1*b494511aSVenki Rajagopalan /*
2*b494511aSVenki Rajagopalan * CDDL HEADER START
3*b494511aSVenki Rajagopalan *
4*b494511aSVenki Rajagopalan * The contents of this file are subject to the terms of the
5*b494511aSVenki Rajagopalan * Common Development and Distribution License (the "License").
6*b494511aSVenki Rajagopalan * You may not use this file except in compliance with the License.
7*b494511aSVenki Rajagopalan *
8*b494511aSVenki Rajagopalan * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*b494511aSVenki Rajagopalan * or http://www.opensolaris.org/os/licensing.
10*b494511aSVenki Rajagopalan * See the License for the specific language governing permissions
11*b494511aSVenki Rajagopalan * and limitations under the License.
12*b494511aSVenki Rajagopalan *
13*b494511aSVenki Rajagopalan * When distributing Covered Code, include this CDDL HEADER in each
14*b494511aSVenki Rajagopalan * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*b494511aSVenki Rajagopalan * If applicable, add the following below this CDDL HEADER, with the
16*b494511aSVenki Rajagopalan * fields enclosed by brackets "[]" replaced with your own identifying
17*b494511aSVenki Rajagopalan * information: Portions Copyright [yyyy] [name of copyright owner]
18*b494511aSVenki Rajagopalan *
19*b494511aSVenki Rajagopalan * CDDL HEADER END
20*b494511aSVenki Rajagopalan */
21*b494511aSVenki Rajagopalan
22*b494511aSVenki Rajagopalan /*
23*b494511aSVenki Rajagopalan * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24*b494511aSVenki Rajagopalan */
25*b494511aSVenki Rajagopalan
26*b494511aSVenki Rajagopalan #include <sys/types.h>
27*b494511aSVenki Rajagopalan #include <sys/kmem.h>
28*b494511aSVenki Rajagopalan #include <sys/conf.h>
29*b494511aSVenki Rajagopalan #include <sys/ddi.h>
30*b494511aSVenki Rajagopalan #include <sys/sunddi.h>
31*b494511aSVenki Rajagopalan #include <sys/sunndi.h>
32*b494511aSVenki Rajagopalan #include <sys/ksynch.h>
33*b494511aSVenki Rajagopalan #include <sys/callb.h>
34*b494511aSVenki Rajagopalan #include <sys/ib/mgt/sm_attr.h> /* SM_INIT_TYPE_REPLY_... */
35*b494511aSVenki Rajagopalan
36*b494511aSVenki Rajagopalan #include <sys/ib/clients/eoib/enx_impl.h>
37*b494511aSVenki Rajagopalan
38*b494511aSVenki Rajagopalan /*
39*b494511aSVenki Rajagopalan * Static function declarations
40*b494511aSVenki Rajagopalan */
41*b494511aSVenki Rajagopalan static void eibnx_gw_is_alive(eibnx_gw_info_t *);
42*b494511aSVenki Rajagopalan static void eibnx_gw_is_aware(eibnx_thr_info_t *, eibnx_gw_info_t *, boolean_t);
43*b494511aSVenki Rajagopalan static void eibnx_process_rx(eibnx_thr_info_t *, ibt_wc_t *, eibnx_wqe_t *);
44*b494511aSVenki Rajagopalan static void eibnx_handle_wcerr(uint8_t, eibnx_wqe_t *, eibnx_thr_info_t *);
45*b494511aSVenki Rajagopalan static void eibnx_handle_login_ack(eibnx_thr_info_t *, uint8_t *);
46*b494511aSVenki Rajagopalan static void eibnx_handle_gw_rebirth(eibnx_thr_info_t *, uint16_t);
47*b494511aSVenki Rajagopalan static void eibnx_handle_gw_info_update(eibnx_thr_info_t *, uint16_t, void *);
48*b494511aSVenki Rajagopalan static int eibnx_replace_portinfo(eibnx_thr_info_t *, ibt_hca_portinfo_t *,
49*b494511aSVenki Rajagopalan uint_t);
50*b494511aSVenki Rajagopalan static void eibnx_handle_port_events(ibt_hca_hdl_t, uint8_t);
51*b494511aSVenki Rajagopalan static void eibnx_handle_hca_attach(ib_guid_t);
52*b494511aSVenki Rajagopalan static void eibnx_handle_hca_detach(ib_guid_t);
53*b494511aSVenki Rajagopalan
54*b494511aSVenki Rajagopalan /*
55*b494511aSVenki Rajagopalan * NDI event handle we need
56*b494511aSVenki Rajagopalan */
57*b494511aSVenki Rajagopalan extern ndi_event_hdl_t enx_ndi_event_hdl;
58*b494511aSVenki Rajagopalan
59*b494511aSVenki Rajagopalan /*
60*b494511aSVenki Rajagopalan * SM's init type reply flags
61*b494511aSVenki Rajagopalan */
62*b494511aSVenki Rajagopalan #define ENX_PORT_ATTR_LOADED(itr) \
63*b494511aSVenki Rajagopalan (((itr) & SM_INIT_TYPE_REPLY_NO_LOAD_REPLY) == 0)
64*b494511aSVenki Rajagopalan #define ENX_PORT_ATTR_NOT_PRESERVED(itr) \
65*b494511aSVenki Rajagopalan (((itr) & SM_INIT_TYPE_PRESERVE_CONTENT_REPLY) == 0)
66*b494511aSVenki Rajagopalan #define ENX_PORT_PRES_NOT_PRESERVED(itr) \
67*b494511aSVenki Rajagopalan (((itr) & SM_INIT_TYPE_PRESERVE_PRESENCE_REPLY) == 0)
68*b494511aSVenki Rajagopalan
69*b494511aSVenki Rajagopalan /*
70*b494511aSVenki Rajagopalan * Port monitor progress flags (all flag values should be non-zero)
71*b494511aSVenki Rajagopalan */
72*b494511aSVenki Rajagopalan #define ENX_MON_LINKSTATE_UP 0x01
73*b494511aSVenki Rajagopalan #define ENX_MON_FOUND_MCGS 0x02
74*b494511aSVenki Rajagopalan #define ENX_MON_SETUP_CQ 0x04
75*b494511aSVenki Rajagopalan #define ENX_MON_SETUP_UD_CHAN 0x08
76*b494511aSVenki Rajagopalan #define ENX_MON_SETUP_BUFS 0x10
77*b494511aSVenki Rajagopalan #define ENX_MON_SETUP_CQ_HDLR 0x20
78*b494511aSVenki Rajagopalan #define ENX_MON_JOINED_MCGS 0x40
79*b494511aSVenki Rajagopalan #define ENX_MON_MULTICAST_SLCT 0x80
80*b494511aSVenki Rajagopalan #define ENX_MON_MAX 0xFF
81*b494511aSVenki Rajagopalan
82*b494511aSVenki Rajagopalan /*
83*b494511aSVenki Rajagopalan * Per-port thread to solicit, monitor and discover EoIB gateways
84*b494511aSVenki Rajagopalan * and create the corresponding EoIB driver instances on the host.
85*b494511aSVenki Rajagopalan */
86*b494511aSVenki Rajagopalan void
eibnx_port_monitor(eibnx_thr_info_t * info)87*b494511aSVenki Rajagopalan eibnx_port_monitor(eibnx_thr_info_t *info)
88*b494511aSVenki Rajagopalan {
89*b494511aSVenki Rajagopalan clock_t solicit_period_ticks;
90*b494511aSVenki Rajagopalan clock_t deadline;
91*b494511aSVenki Rajagopalan kmutex_t ci_lock;
92*b494511aSVenki Rajagopalan callb_cpr_t ci;
93*b494511aSVenki Rajagopalan char thr_name[MAXNAMELEN];
94*b494511aSVenki Rajagopalan
95*b494511aSVenki Rajagopalan (void) snprintf(thr_name, MAXNAMELEN, ENX_PORT_MONITOR,
96*b494511aSVenki Rajagopalan info->ti_pi->p_port_num);
97*b494511aSVenki Rajagopalan
98*b494511aSVenki Rajagopalan mutex_init(&ci_lock, NULL, MUTEX_DRIVER, NULL);
99*b494511aSVenki Rajagopalan CALLB_CPR_INIT(&ci, &ci_lock, callb_generic_cpr, thr_name);
100*b494511aSVenki Rajagopalan
101*b494511aSVenki Rajagopalan info->ti_progress = 0;
102*b494511aSVenki Rajagopalan
103*b494511aSVenki Rajagopalan /*
104*b494511aSVenki Rajagopalan * If the port is not active yet, wait for a port up event. The
105*b494511aSVenki Rajagopalan * async handler, when it sees a port-up event, is expected to
106*b494511aSVenki Rajagopalan * update the port_monitor's portinfo structure's p_linkstate
107*b494511aSVenki Rajagopalan * and wake us up with ENX_EVENT_LINK_UP.
108*b494511aSVenki Rajagopalan */
109*b494511aSVenki Rajagopalan while (info->ti_pi->p_linkstate != IBT_PORT_ACTIVE) {
110*b494511aSVenki Rajagopalan mutex_enter(&info->ti_event_lock);
111*b494511aSVenki Rajagopalan while ((info->ti_event &
112*b494511aSVenki Rajagopalan (ENX_EVENT_LINK_UP | ENX_EVENT_DIE)) == 0) {
113*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
114*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
115*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
116*b494511aSVenki Rajagopalan
117*b494511aSVenki Rajagopalan cv_wait(&info->ti_event_cv, &info->ti_event_lock);
118*b494511aSVenki Rajagopalan
119*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
120*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
121*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
122*b494511aSVenki Rajagopalan }
123*b494511aSVenki Rajagopalan if (info->ti_event & ENX_EVENT_DIE) {
124*b494511aSVenki Rajagopalan mutex_exit(&info->ti_event_lock);
125*b494511aSVenki Rajagopalan goto port_monitor_exit;
126*b494511aSVenki Rajagopalan }
127*b494511aSVenki Rajagopalan info->ti_event &= (~ENX_EVENT_LINK_UP);
128*b494511aSVenki Rajagopalan mutex_exit(&info->ti_event_lock);
129*b494511aSVenki Rajagopalan }
130*b494511aSVenki Rajagopalan info->ti_progress |= ENX_MON_LINKSTATE_UP;
131*b494511aSVenki Rajagopalan
132*b494511aSVenki Rajagopalan /*
133*b494511aSVenki Rajagopalan * Locate the multicast groups for sending solicit requests
134*b494511aSVenki Rajagopalan * to the GW and receiving advertisements from the GW. If
135*b494511aSVenki Rajagopalan * either of the mcg is not present, wait for them to be
136*b494511aSVenki Rajagopalan * created by the GW.
137*b494511aSVenki Rajagopalan */
138*b494511aSVenki Rajagopalan while (eibnx_find_mgroups(info) != ENX_E_SUCCESS) {
139*b494511aSVenki Rajagopalan mutex_enter(&info->ti_event_lock);
140*b494511aSVenki Rajagopalan while ((info->ti_event &
141*b494511aSVenki Rajagopalan (ENX_EVENT_MCGS_AVAILABLE | ENX_EVENT_DIE)) == 0) {
142*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
143*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
144*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
145*b494511aSVenki Rajagopalan
146*b494511aSVenki Rajagopalan cv_wait(&info->ti_event_cv, &info->ti_event_lock);
147*b494511aSVenki Rajagopalan
148*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
149*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
150*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
151*b494511aSVenki Rajagopalan }
152*b494511aSVenki Rajagopalan if (info->ti_event & ENX_EVENT_DIE) {
153*b494511aSVenki Rajagopalan mutex_exit(&info->ti_event_lock);
154*b494511aSVenki Rajagopalan goto port_monitor_exit;
155*b494511aSVenki Rajagopalan }
156*b494511aSVenki Rajagopalan info->ti_event &= (~ENX_EVENT_MCGS_AVAILABLE);
157*b494511aSVenki Rajagopalan mutex_exit(&info->ti_event_lock);
158*b494511aSVenki Rajagopalan }
159*b494511aSVenki Rajagopalan info->ti_progress |= ENX_MON_FOUND_MCGS;
160*b494511aSVenki Rajagopalan
161*b494511aSVenki Rajagopalan /*
162*b494511aSVenki Rajagopalan * Setup a shared CQ
163*b494511aSVenki Rajagopalan */
164*b494511aSVenki Rajagopalan if (eibnx_setup_cq(info) != ENX_E_SUCCESS) {
165*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("eibnx_setup_cq() failed, terminating "
166*b494511aSVenki Rajagopalan "port monitor for (hca_guid=0x%llx, port_num=0x%x)",
167*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num);
168*b494511aSVenki Rajagopalan goto port_monitor_exit;
169*b494511aSVenki Rajagopalan }
170*b494511aSVenki Rajagopalan info->ti_progress |= ENX_MON_SETUP_CQ;
171*b494511aSVenki Rajagopalan
172*b494511aSVenki Rajagopalan /*
173*b494511aSVenki Rajagopalan * Setup UD channel
174*b494511aSVenki Rajagopalan */
175*b494511aSVenki Rajagopalan if (eibnx_setup_ud_channel(info) != ENX_E_SUCCESS) {
176*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("eibnx_setup_ud_channel() failed, terminating "
177*b494511aSVenki Rajagopalan "port monitor for (hca_guid=0x%llx, port_num=0x%x)",
178*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num);
179*b494511aSVenki Rajagopalan goto port_monitor_exit;
180*b494511aSVenki Rajagopalan }
181*b494511aSVenki Rajagopalan info->ti_progress |= ENX_MON_SETUP_UD_CHAN;
182*b494511aSVenki Rajagopalan
183*b494511aSVenki Rajagopalan /*
184*b494511aSVenki Rajagopalan * Allocate/initialize any tx/rx buffers
185*b494511aSVenki Rajagopalan */
186*b494511aSVenki Rajagopalan if (eibnx_setup_bufs(info) != ENX_E_SUCCESS) {
187*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("eibnx_setup_bufs() failed, terminating "
188*b494511aSVenki Rajagopalan "port monitor for (hca_guid=0x%llx, port_num=0x%x)",
189*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num);
190*b494511aSVenki Rajagopalan goto port_monitor_exit;
191*b494511aSVenki Rajagopalan }
192*b494511aSVenki Rajagopalan info->ti_progress |= ENX_MON_SETUP_BUFS;
193*b494511aSVenki Rajagopalan
194*b494511aSVenki Rajagopalan /*
195*b494511aSVenki Rajagopalan * Setup completion handler
196*b494511aSVenki Rajagopalan */
197*b494511aSVenki Rajagopalan if (eibnx_setup_cq_handler(info) != ENX_E_SUCCESS) {
198*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("eibnx_setup_cq_handler() failed, terminating "
199*b494511aSVenki Rajagopalan "port monitor for (hca_guid=0x%llx, port_num=0x%x)",
200*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num);
201*b494511aSVenki Rajagopalan goto port_monitor_exit;
202*b494511aSVenki Rajagopalan }
203*b494511aSVenki Rajagopalan info->ti_progress |= ENX_MON_SETUP_CQ_HDLR;
204*b494511aSVenki Rajagopalan
205*b494511aSVenki Rajagopalan /*
206*b494511aSVenki Rajagopalan * Join EoIB multicast groups
207*b494511aSVenki Rajagopalan */
208*b494511aSVenki Rajagopalan if (eibnx_join_mcgs(info) != ENX_E_SUCCESS) {
209*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("eibnx_join_mcgs() failed, terminating ",
210*b494511aSVenki Rajagopalan "port monitor for (hca_guid=0x%llx, port_num=0x%x)",
211*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num);
212*b494511aSVenki Rajagopalan goto port_monitor_exit;
213*b494511aSVenki Rajagopalan }
214*b494511aSVenki Rajagopalan info->ti_progress |= ENX_MON_JOINED_MCGS;
215*b494511aSVenki Rajagopalan
216*b494511aSVenki Rajagopalan /*
217*b494511aSVenki Rajagopalan * Send SOLICIT pkt to the EoIB multicast group
218*b494511aSVenki Rajagopalan */
219*b494511aSVenki Rajagopalan if (eibnx_fip_solicit_mcast(info) != ENX_E_SUCCESS) {
220*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("eibnx_fip_solicit_mcast() failed, terminating "
221*b494511aSVenki Rajagopalan "port monitor for (hca_guid=0x%llx, port_num=0x%x)",
222*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num);
223*b494511aSVenki Rajagopalan goto port_monitor_exit;
224*b494511aSVenki Rajagopalan }
225*b494511aSVenki Rajagopalan info->ti_progress |= ENX_MON_MULTICAST_SLCT;
226*b494511aSVenki Rajagopalan
227*b494511aSVenki Rajagopalan mutex_enter(&info->ti_event_lock);
228*b494511aSVenki Rajagopalan
229*b494511aSVenki Rajagopalan solicit_period_ticks = drv_usectohz(ENX_DFL_SOLICIT_PERIOD_USEC);
230*b494511aSVenki Rajagopalan
231*b494511aSVenki Rajagopalan periodic_solicit:
232*b494511aSVenki Rajagopalan deadline = ddi_get_lbolt() + solicit_period_ticks;
233*b494511aSVenki Rajagopalan while ((info->ti_event & (ENX_EVENT_TIMED_OUT | ENX_EVENT_DIE)) == 0) {
234*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
235*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
236*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
237*b494511aSVenki Rajagopalan
238*b494511aSVenki Rajagopalan if (cv_timedwait(&info->ti_event_cv, &info->ti_event_lock,
239*b494511aSVenki Rajagopalan deadline) == -1) {
240*b494511aSVenki Rajagopalan info->ti_event |= ENX_EVENT_TIMED_OUT;
241*b494511aSVenki Rajagopalan }
242*b494511aSVenki Rajagopalan
243*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
244*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
245*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
246*b494511aSVenki Rajagopalan }
247*b494511aSVenki Rajagopalan
248*b494511aSVenki Rajagopalan if (info->ti_event & ENX_EVENT_DIE) {
249*b494511aSVenki Rajagopalan mutex_exit(&info->ti_event_lock);
250*b494511aSVenki Rajagopalan goto port_monitor_exit;
251*b494511aSVenki Rajagopalan }
252*b494511aSVenki Rajagopalan
253*b494511aSVenki Rajagopalan if (info->ti_event & ENX_EVENT_TIMED_OUT) {
254*b494511aSVenki Rajagopalan if (eibnx_fip_solicit_ucast(info,
255*b494511aSVenki Rajagopalan &solicit_period_ticks) != ENX_E_SUCCESS) {
256*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("failed to send solicit ucast to "
257*b494511aSVenki Rajagopalan "gateways (hca_guid=0x%llx, port_num=0x%x)",
258*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num);
259*b494511aSVenki Rajagopalan }
260*b494511aSVenki Rajagopalan info->ti_event &= ~ENX_EVENT_TIMED_OUT;
261*b494511aSVenki Rajagopalan }
262*b494511aSVenki Rajagopalan
263*b494511aSVenki Rajagopalan goto periodic_solicit;
264*b494511aSVenki Rajagopalan
265*b494511aSVenki Rajagopalan port_monitor_exit:
266*b494511aSVenki Rajagopalan if (info->ti_progress & ENX_MON_MULTICAST_SLCT) {
267*b494511aSVenki Rajagopalan eibnx_cleanup_port_nodes(info);
268*b494511aSVenki Rajagopalan info->ti_progress &= (~ENX_MON_MULTICAST_SLCT);
269*b494511aSVenki Rajagopalan }
270*b494511aSVenki Rajagopalan if (info->ti_progress & ENX_MON_JOINED_MCGS) {
271*b494511aSVenki Rajagopalan eibnx_rb_join_mcgs(info);
272*b494511aSVenki Rajagopalan info->ti_progress &= (~ENX_MON_JOINED_MCGS);
273*b494511aSVenki Rajagopalan }
274*b494511aSVenki Rajagopalan if (info->ti_progress & ENX_MON_SETUP_CQ_HDLR) {
275*b494511aSVenki Rajagopalan eibnx_rb_setup_cq_handler(info);
276*b494511aSVenki Rajagopalan info->ti_progress &= (~ENX_MON_SETUP_CQ_HDLR);
277*b494511aSVenki Rajagopalan }
278*b494511aSVenki Rajagopalan if (info->ti_progress & ENX_MON_SETUP_BUFS) {
279*b494511aSVenki Rajagopalan eibnx_rb_setup_bufs(info);
280*b494511aSVenki Rajagopalan info->ti_progress &= (~ENX_MON_SETUP_BUFS);
281*b494511aSVenki Rajagopalan }
282*b494511aSVenki Rajagopalan if (info->ti_progress & ENX_MON_SETUP_UD_CHAN) {
283*b494511aSVenki Rajagopalan eibnx_rb_setup_ud_channel(info);
284*b494511aSVenki Rajagopalan info->ti_progress &= (~ENX_MON_SETUP_UD_CHAN);
285*b494511aSVenki Rajagopalan }
286*b494511aSVenki Rajagopalan if (info->ti_progress & ENX_MON_SETUP_CQ) {
287*b494511aSVenki Rajagopalan eibnx_rb_setup_cq(info);
288*b494511aSVenki Rajagopalan info->ti_progress &= (~ENX_MON_SETUP_CQ);
289*b494511aSVenki Rajagopalan }
290*b494511aSVenki Rajagopalan if (info->ti_progress & ENX_MON_FOUND_MCGS) {
291*b494511aSVenki Rajagopalan eibnx_rb_find_mgroups(info);
292*b494511aSVenki Rajagopalan info->ti_progress &= (~ENX_MON_FOUND_MCGS);
293*b494511aSVenki Rajagopalan }
294*b494511aSVenki Rajagopalan
295*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
296*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
297*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
298*b494511aSVenki Rajagopalan }
299*b494511aSVenki Rajagopalan
300*b494511aSVenki Rajagopalan /*
301*b494511aSVenki Rajagopalan * Async subnet notices handler registered with IBTF
302*b494511aSVenki Rajagopalan */
303*b494511aSVenki Rajagopalan /*ARGSUSED*/
304*b494511aSVenki Rajagopalan void
eibnx_subnet_notices_handler(void * arg,ib_gid_t gid,ibt_subnet_event_code_t sn_evcode,ibt_subnet_event_t * sn_event)305*b494511aSVenki Rajagopalan eibnx_subnet_notices_handler(void *arg, ib_gid_t gid,
306*b494511aSVenki Rajagopalan ibt_subnet_event_code_t sn_evcode, ibt_subnet_event_t *sn_event)
307*b494511aSVenki Rajagopalan {
308*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
309*b494511aSVenki Rajagopalan eibnx_thr_info_t *ti;
310*b494511aSVenki Rajagopalan ib_gid_t notice_gid;
311*b494511aSVenki Rajagopalan
312*b494511aSVenki Rajagopalan switch (sn_evcode) {
313*b494511aSVenki Rajagopalan case IBT_SM_EVENT_MCG_CREATED:
314*b494511aSVenki Rajagopalan notice_gid = sn_event->sm_notice_gid;
315*b494511aSVenki Rajagopalan
316*b494511aSVenki Rajagopalan if ((notice_gid.gid_prefix == enx_solicit_mgid.gid_prefix &&
317*b494511aSVenki Rajagopalan notice_gid.gid_guid == enx_solicit_mgid.gid_guid) ||
318*b494511aSVenki Rajagopalan (notice_gid.gid_prefix == enx_advertise_mgid.gid_prefix &&
319*b494511aSVenki Rajagopalan notice_gid.gid_guid == enx_advertise_mgid.gid_guid)) {
320*b494511aSVenki Rajagopalan
321*b494511aSVenki Rajagopalan mutex_enter(&ss->nx_lock);
322*b494511aSVenki Rajagopalan for (ti = ss->nx_thr_info; ti; ti = ti->ti_next) {
323*b494511aSVenki Rajagopalan mutex_enter(&ti->ti_event_lock);
324*b494511aSVenki Rajagopalan ti->ti_event |= ENX_EVENT_MCGS_AVAILABLE;
325*b494511aSVenki Rajagopalan cv_broadcast(&ti->ti_event_cv);
326*b494511aSVenki Rajagopalan mutex_exit(&ti->ti_event_lock);
327*b494511aSVenki Rajagopalan }
328*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
329*b494511aSVenki Rajagopalan }
330*b494511aSVenki Rajagopalan break;
331*b494511aSVenki Rajagopalan
332*b494511aSVenki Rajagopalan case IBT_SM_EVENT_MCG_DELETED:
333*b494511aSVenki Rajagopalan break;
334*b494511aSVenki Rajagopalan
335*b494511aSVenki Rajagopalan default:
336*b494511aSVenki Rajagopalan break;
337*b494511aSVenki Rajagopalan }
338*b494511aSVenki Rajagopalan }
339*b494511aSVenki Rajagopalan
340*b494511aSVenki Rajagopalan /*
341*b494511aSVenki Rajagopalan * Async event handler registered with IBTF
342*b494511aSVenki Rajagopalan */
343*b494511aSVenki Rajagopalan /*ARGSUSED*/
344*b494511aSVenki Rajagopalan void
eibnx_async_handler(void * clnt_pvt,ibt_hca_hdl_t hca,ibt_async_code_t code,ibt_async_event_t * event)345*b494511aSVenki Rajagopalan eibnx_async_handler(void *clnt_pvt, ibt_hca_hdl_t hca,
346*b494511aSVenki Rajagopalan ibt_async_code_t code, ibt_async_event_t *event)
347*b494511aSVenki Rajagopalan {
348*b494511aSVenki Rajagopalan switch (code) {
349*b494511aSVenki Rajagopalan case IBT_ERROR_CATASTROPHIC_CHAN:
350*b494511aSVenki Rajagopalan case IBT_ERROR_INVALID_REQUEST_CHAN:
351*b494511aSVenki Rajagopalan case IBT_ERROR_ACCESS_VIOLATION_CHAN:
352*b494511aSVenki Rajagopalan case IBT_ERROR_CQ:
353*b494511aSVenki Rajagopalan case IBT_ERROR_CATASTROPHIC_SRQ:
354*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("ibt ERROR event 0x%x received "
355*b494511aSVenki Rajagopalan "(hca_guid=0x%llx)", code, event->ev_hca_guid);
356*b494511aSVenki Rajagopalan break;
357*b494511aSVenki Rajagopalan
358*b494511aSVenki Rajagopalan case IBT_ERROR_PORT_DOWN:
359*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("ibt PORT_DOWN event received "
360*b494511aSVenki Rajagopalan "(hca_guid=0x%llx, port_num=0x%x)",
361*b494511aSVenki Rajagopalan event->ev_hca_guid, event->ev_port);
362*b494511aSVenki Rajagopalan break;
363*b494511aSVenki Rajagopalan
364*b494511aSVenki Rajagopalan case IBT_EVENT_PORT_UP:
365*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("ibt PORT_UP event received "
366*b494511aSVenki Rajagopalan "(hca_guid=0x%llx, port_num=0x%x)",
367*b494511aSVenki Rajagopalan event->ev_hca_guid, event->ev_port);
368*b494511aSVenki Rajagopalan eibnx_handle_port_events(hca, event->ev_port);
369*b494511aSVenki Rajagopalan break;
370*b494511aSVenki Rajagopalan
371*b494511aSVenki Rajagopalan case IBT_PORT_CHANGE_EVENT:
372*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("ibt PORT_CHANGE event received "
373*b494511aSVenki Rajagopalan "(hca_guid=0x%llx, port_num=0x%x)",
374*b494511aSVenki Rajagopalan event->ev_hca_guid, event->ev_port);
375*b494511aSVenki Rajagopalan eibnx_handle_port_events(hca, event->ev_port);
376*b494511aSVenki Rajagopalan break;
377*b494511aSVenki Rajagopalan
378*b494511aSVenki Rajagopalan case IBT_CLNT_REREG_EVENT:
379*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("ibt CLNT_REREG event received "
380*b494511aSVenki Rajagopalan "(hca_guid=0x%llx, port_num=0x%x)",
381*b494511aSVenki Rajagopalan event->ev_hca_guid, event->ev_port);
382*b494511aSVenki Rajagopalan eibnx_handle_port_events(hca, event->ev_port);
383*b494511aSVenki Rajagopalan break;
384*b494511aSVenki Rajagopalan
385*b494511aSVenki Rajagopalan case IBT_HCA_ATTACH_EVENT:
386*b494511aSVenki Rajagopalan ENX_DPRINTF_VERBOSE("ibt HCA_ATTACH event received "
387*b494511aSVenki Rajagopalan "(new hca_guid=0x%llx)", event->ev_hca_guid);
388*b494511aSVenki Rajagopalan eibnx_handle_hca_attach(event->ev_hca_guid);
389*b494511aSVenki Rajagopalan break;
390*b494511aSVenki Rajagopalan
391*b494511aSVenki Rajagopalan case IBT_HCA_DETACH_EVENT:
392*b494511aSVenki Rajagopalan ENX_DPRINTF_VERBOSE("ibt HCA_DETACH event received "
393*b494511aSVenki Rajagopalan "(target hca_guid=0x%llx)", event->ev_hca_guid);
394*b494511aSVenki Rajagopalan eibnx_handle_hca_detach(event->ev_hca_guid);
395*b494511aSVenki Rajagopalan break;
396*b494511aSVenki Rajagopalan
397*b494511aSVenki Rajagopalan default:
398*b494511aSVenki Rajagopalan ENX_DPRINTF_VERBOSE("ibt UNSUPPORTED event 0x%x received "
399*b494511aSVenki Rajagopalan "(hca_guid=0x%llx)", code, event->ev_hca_guid);
400*b494511aSVenki Rajagopalan break;
401*b494511aSVenki Rajagopalan }
402*b494511aSVenki Rajagopalan }
403*b494511aSVenki Rajagopalan
404*b494511aSVenki Rajagopalan boolean_t
eibnx_is_gw_dead(eibnx_gw_info_t * gwi)405*b494511aSVenki Rajagopalan eibnx_is_gw_dead(eibnx_gw_info_t *gwi)
406*b494511aSVenki Rajagopalan {
407*b494511aSVenki Rajagopalan int64_t cur_lbolt;
408*b494511aSVenki Rajagopalan
409*b494511aSVenki Rajagopalan cur_lbolt = ddi_get_lbolt64();
410*b494511aSVenki Rajagopalan
411*b494511aSVenki Rajagopalan mutex_enter(&gwi->gw_adv_lock);
412*b494511aSVenki Rajagopalan if ((cur_lbolt - gwi->gw_adv_last_lbolt) > gwi->gw_adv_timeout_ticks) {
413*b494511aSVenki Rajagopalan gwi->gw_adv_flag = ENX_GW_DEAD;
414*b494511aSVenki Rajagopalan mutex_exit(&gwi->gw_adv_lock);
415*b494511aSVenki Rajagopalan return (B_TRUE);
416*b494511aSVenki Rajagopalan }
417*b494511aSVenki Rajagopalan mutex_exit(&gwi->gw_adv_lock);
418*b494511aSVenki Rajagopalan
419*b494511aSVenki Rajagopalan return (B_FALSE);
420*b494511aSVenki Rajagopalan }
421*b494511aSVenki Rajagopalan
422*b494511aSVenki Rajagopalan static void
eibnx_gw_is_alive(eibnx_gw_info_t * gwi)423*b494511aSVenki Rajagopalan eibnx_gw_is_alive(eibnx_gw_info_t *gwi)
424*b494511aSVenki Rajagopalan {
425*b494511aSVenki Rajagopalan /*
426*b494511aSVenki Rajagopalan * We've just received a multicast advertisement from this
427*b494511aSVenki Rajagopalan * gateway. Multicast or unicast, this means that the gateway
428*b494511aSVenki Rajagopalan * is alive. Record this timestamp (in ticks).
429*b494511aSVenki Rajagopalan */
430*b494511aSVenki Rajagopalan mutex_enter(&gwi->gw_adv_lock);
431*b494511aSVenki Rajagopalan gwi->gw_adv_last_lbolt = ddi_get_lbolt64();
432*b494511aSVenki Rajagopalan if (gwi->gw_adv_flag == ENX_GW_DEAD) {
433*b494511aSVenki Rajagopalan gwi->gw_adv_flag = ENX_GW_ALIVE;
434*b494511aSVenki Rajagopalan }
435*b494511aSVenki Rajagopalan mutex_exit(&gwi->gw_adv_lock);
436*b494511aSVenki Rajagopalan }
437*b494511aSVenki Rajagopalan
438*b494511aSVenki Rajagopalan static void
eibnx_gw_is_aware(eibnx_thr_info_t * info,eibnx_gw_info_t * gwi,boolean_t gwi_changed)439*b494511aSVenki Rajagopalan eibnx_gw_is_aware(eibnx_thr_info_t *info, eibnx_gw_info_t *gwi,
440*b494511aSVenki Rajagopalan boolean_t gwi_changed)
441*b494511aSVenki Rajagopalan {
442*b494511aSVenki Rajagopalan eib_gw_info_t eib_gwi;
443*b494511aSVenki Rajagopalan boolean_t post_rebirth_event = B_FALSE;
444*b494511aSVenki Rajagopalan
445*b494511aSVenki Rajagopalan /*
446*b494511aSVenki Rajagopalan * We're here when we receive a unicast advertisement from a
447*b494511aSVenki Rajagopalan * gateway. If this gateway was discovered earlier but was in
448*b494511aSVenki Rajagopalan * a dead state, this means it has come back alive and become
449*b494511aSVenki Rajagopalan * aware of us. We may need to inform any EoIB children
450*b494511aSVenki Rajagopalan * waiting for notification. Note that if this gateway is
451*b494511aSVenki Rajagopalan * being discovered for the first time now, we wouldn't have
452*b494511aSVenki Rajagopalan * created the binding eoib node for it (we will do that when
453*b494511aSVenki Rajagopalan * we return from this routine), so the "rebirth" and "gw info
454*b494511aSVenki Rajagopalan * update" event postings will be NOPs.
455*b494511aSVenki Rajagopalan */
456*b494511aSVenki Rajagopalan mutex_enter(&gwi->gw_adv_lock);
457*b494511aSVenki Rajagopalan gwi->gw_adv_last_lbolt = ddi_get_lbolt64();
458*b494511aSVenki Rajagopalan if (gwi->gw_adv_flag != ENX_GW_AWARE) {
459*b494511aSVenki Rajagopalan post_rebirth_event = B_TRUE;
460*b494511aSVenki Rajagopalan }
461*b494511aSVenki Rajagopalan gwi->gw_adv_flag = ENX_GW_AWARE;
462*b494511aSVenki Rajagopalan mutex_exit(&gwi->gw_adv_lock);
463*b494511aSVenki Rajagopalan
464*b494511aSVenki Rajagopalan /*
465*b494511aSVenki Rajagopalan * If we have a gateway information update event, we post that
466*b494511aSVenki Rajagopalan * first, so any rebirth event processed later will have the
467*b494511aSVenki Rajagopalan * correct gateway information.
468*b494511aSVenki Rajagopalan */
469*b494511aSVenki Rajagopalan if (gwi_changed) {
470*b494511aSVenki Rajagopalan eib_gwi.gi_system_guid = gwi->gw_system_guid;
471*b494511aSVenki Rajagopalan eib_gwi.gi_guid = gwi->gw_guid;
472*b494511aSVenki Rajagopalan eib_gwi.gi_sn_prefix = gwi->gw_addr.ga_gid.gid_prefix;
473*b494511aSVenki Rajagopalan eib_gwi.gi_adv_period = gwi->gw_adv_period;
474*b494511aSVenki Rajagopalan eib_gwi.gi_ka_period = gwi->gw_ka_period;
475*b494511aSVenki Rajagopalan eib_gwi.gi_vnic_ka_period = gwi->gw_vnic_ka_period;
476*b494511aSVenki Rajagopalan eib_gwi.gi_ctrl_qpn = gwi->gw_ctrl_qpn;
477*b494511aSVenki Rajagopalan eib_gwi.gi_lid = gwi->gw_lid;
478*b494511aSVenki Rajagopalan eib_gwi.gi_portid = gwi->gw_portid;
479*b494511aSVenki Rajagopalan eib_gwi.gi_num_net_vnics = gwi->gw_num_net_vnics;
480*b494511aSVenki Rajagopalan eib_gwi.gi_flag_available = gwi->gw_flag_available;
481*b494511aSVenki Rajagopalan eib_gwi.gi_is_host_adm_vnics = gwi->gw_is_host_adm_vnics;
482*b494511aSVenki Rajagopalan eib_gwi.gi_sl = gwi->gw_sl;
483*b494511aSVenki Rajagopalan eib_gwi.gi_n_rss_qpn = gwi->gw_n_rss_qpn;
484*b494511aSVenki Rajagopalan bcopy(gwi->gw_system_name, eib_gwi.gi_system_name,
485*b494511aSVenki Rajagopalan EIB_GW_SYSNAME_LEN);
486*b494511aSVenki Rajagopalan bcopy(gwi->gw_port_name, eib_gwi.gi_port_name,
487*b494511aSVenki Rajagopalan EIB_GW_PORTNAME_LEN);
488*b494511aSVenki Rajagopalan bcopy(gwi->gw_vendor_id, eib_gwi.gi_vendor_id,
489*b494511aSVenki Rajagopalan EIB_GW_VENDOR_LEN);
490*b494511aSVenki Rajagopalan
491*b494511aSVenki Rajagopalan eibnx_handle_gw_info_update(info, eib_gwi.gi_portid, &eib_gwi);
492*b494511aSVenki Rajagopalan }
493*b494511aSVenki Rajagopalan if (post_rebirth_event) {
494*b494511aSVenki Rajagopalan eibnx_handle_gw_rebirth(info, gwi->gw_portid);
495*b494511aSVenki Rajagopalan }
496*b494511aSVenki Rajagopalan }
497*b494511aSVenki Rajagopalan
498*b494511aSVenki Rajagopalan /*
499*b494511aSVenki Rajagopalan * Thread to create eoib nodes and online instances
500*b494511aSVenki Rajagopalan */
501*b494511aSVenki Rajagopalan void
eibnx_create_eoib_node(void)502*b494511aSVenki Rajagopalan eibnx_create_eoib_node(void)
503*b494511aSVenki Rajagopalan {
504*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
505*b494511aSVenki Rajagopalan eibnx_nodeq_t *node;
506*b494511aSVenki Rajagopalan kmutex_t ci_lock;
507*b494511aSVenki Rajagopalan callb_cpr_t ci;
508*b494511aSVenki Rajagopalan
509*b494511aSVenki Rajagopalan mutex_init(&ci_lock, NULL, MUTEX_DRIVER, NULL);
510*b494511aSVenki Rajagopalan CALLB_CPR_INIT(&ci, &ci_lock, callb_generic_cpr, ENX_NODE_CREATOR);
511*b494511aSVenki Rajagopalan
512*b494511aSVenki Rajagopalan wait_for_node_to_create:
513*b494511aSVenki Rajagopalan mutex_enter(&ss->nx_nodeq_lock);
514*b494511aSVenki Rajagopalan
515*b494511aSVenki Rajagopalan while ((ss->nx_nodeq == NULL) && (ss->nx_nodeq_thr_die == 0)) {
516*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
517*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
518*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
519*b494511aSVenki Rajagopalan
520*b494511aSVenki Rajagopalan cv_wait(&ss->nx_nodeq_cv, &ss->nx_nodeq_lock);
521*b494511aSVenki Rajagopalan
522*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
523*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
524*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
525*b494511aSVenki Rajagopalan }
526*b494511aSVenki Rajagopalan
527*b494511aSVenki Rajagopalan /*
528*b494511aSVenki Rajagopalan * If this is not really a work item, but a request for us to
529*b494511aSVenki Rajagopalan * die, throwaway all pending work requests and just die.
530*b494511aSVenki Rajagopalan */
531*b494511aSVenki Rajagopalan if (ss->nx_nodeq_thr_die) {
532*b494511aSVenki Rajagopalan while (ss->nx_nodeq) {
533*b494511aSVenki Rajagopalan node = ss->nx_nodeq;
534*b494511aSVenki Rajagopalan ss->nx_nodeq = node->nc_next;
535*b494511aSVenki Rajagopalan node->nc_next = NULL;
536*b494511aSVenki Rajagopalan
537*b494511aSVenki Rajagopalan kmem_free(node, sizeof (eibnx_nodeq_t));
538*b494511aSVenki Rajagopalan }
539*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_nodeq_lock);
540*b494511aSVenki Rajagopalan
541*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
542*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
543*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
544*b494511aSVenki Rajagopalan
545*b494511aSVenki Rajagopalan return;
546*b494511aSVenki Rajagopalan }
547*b494511aSVenki Rajagopalan
548*b494511aSVenki Rajagopalan /*
549*b494511aSVenki Rajagopalan * Grab the first node entry from the queue
550*b494511aSVenki Rajagopalan */
551*b494511aSVenki Rajagopalan ASSERT(ss->nx_nodeq != NULL);
552*b494511aSVenki Rajagopalan node = ss->nx_nodeq;
553*b494511aSVenki Rajagopalan ss->nx_nodeq = node->nc_next;
554*b494511aSVenki Rajagopalan node->nc_next = NULL;
555*b494511aSVenki Rajagopalan
556*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_nodeq_lock);
557*b494511aSVenki Rajagopalan
558*b494511aSVenki Rajagopalan (void) eibnx_configure_node(node->nc_info, node->nc_gwi, NULL);
559*b494511aSVenki Rajagopalan
560*b494511aSVenki Rajagopalan kmem_free(node, sizeof (eibnx_nodeq_t));
561*b494511aSVenki Rajagopalan goto wait_for_node_to_create;
562*b494511aSVenki Rajagopalan
563*b494511aSVenki Rajagopalan /*NOTREACHED*/
564*b494511aSVenki Rajagopalan }
565*b494511aSVenki Rajagopalan
566*b494511aSVenki Rajagopalan /*
567*b494511aSVenki Rajagopalan * Tx and Rx completion interrupt handler. Guaranteed to be single
568*b494511aSVenki Rajagopalan * threaded and nonreentrant for this CQ.
569*b494511aSVenki Rajagopalan */
570*b494511aSVenki Rajagopalan void
eibnx_comp_intr(ibt_cq_hdl_t cq_hdl,void * arg)571*b494511aSVenki Rajagopalan eibnx_comp_intr(ibt_cq_hdl_t cq_hdl, void *arg)
572*b494511aSVenki Rajagopalan {
573*b494511aSVenki Rajagopalan eibnx_thr_info_t *info = arg;
574*b494511aSVenki Rajagopalan
575*b494511aSVenki Rajagopalan if (info->ti_cq_hdl != cq_hdl) {
576*b494511aSVenki Rajagopalan ENX_DPRINTF_DEBUG("eibnx_comp_intr: "
577*b494511aSVenki Rajagopalan "cq_hdl(0x%llx) != info->ti_cq_hdl(0x%llx), "
578*b494511aSVenki Rajagopalan "ignoring completion", cq_hdl, info->ti_cq_hdl);
579*b494511aSVenki Rajagopalan return;
580*b494511aSVenki Rajagopalan }
581*b494511aSVenki Rajagopalan
582*b494511aSVenki Rajagopalan ASSERT(info->ti_softint_hdl != NULL);
583*b494511aSVenki Rajagopalan
584*b494511aSVenki Rajagopalan (void) ddi_intr_trigger_softint(info->ti_softint_hdl, NULL);
585*b494511aSVenki Rajagopalan }
586*b494511aSVenki Rajagopalan
587*b494511aSVenki Rajagopalan /*
588*b494511aSVenki Rajagopalan * Send and Receive completion handler functions for EoIB nexus
589*b494511aSVenki Rajagopalan */
590*b494511aSVenki Rajagopalan
591*b494511aSVenki Rajagopalan /*ARGSUSED*/
592*b494511aSVenki Rajagopalan uint_t
eibnx_comp_handler(caddr_t arg1,caddr_t arg2)593*b494511aSVenki Rajagopalan eibnx_comp_handler(caddr_t arg1, caddr_t arg2)
594*b494511aSVenki Rajagopalan {
595*b494511aSVenki Rajagopalan eibnx_thr_info_t *info = (eibnx_thr_info_t *)arg1;
596*b494511aSVenki Rajagopalan ibt_wc_t *wc;
597*b494511aSVenki Rajagopalan eibnx_wqe_t *wqe;
598*b494511aSVenki Rajagopalan ibt_status_t ret;
599*b494511aSVenki Rajagopalan uint_t polled;
600*b494511aSVenki Rajagopalan int i;
601*b494511aSVenki Rajagopalan
602*b494511aSVenki Rajagopalan /*
603*b494511aSVenki Rajagopalan * Make sure the port monitor isn't killed if we're in the completion
604*b494511aSVenki Rajagopalan * handler. If the port monitor thread is already being killed, we'll
605*b494511aSVenki Rajagopalan * stop processing completions.
606*b494511aSVenki Rajagopalan */
607*b494511aSVenki Rajagopalan mutex_enter(&info->ti_event_lock);
608*b494511aSVenki Rajagopalan if (info->ti_event & (ENX_EVENT_DIE | ENX_EVENT_COMPLETION)) {
609*b494511aSVenki Rajagopalan mutex_exit(&info->ti_event_lock);
610*b494511aSVenki Rajagopalan return ((uint_t)ENX_E_SUCCESS);
611*b494511aSVenki Rajagopalan }
612*b494511aSVenki Rajagopalan info->ti_event |= ENX_EVENT_COMPLETION;
613*b494511aSVenki Rajagopalan mutex_exit(&info->ti_event_lock);
614*b494511aSVenki Rajagopalan
615*b494511aSVenki Rajagopalan /*
616*b494511aSVenki Rajagopalan * Re-arm the notification callback before we start polling
617*b494511aSVenki Rajagopalan * the completion queue. There's nothing much we can do if the
618*b494511aSVenki Rajagopalan * enable_cq_notify fails - we issue a warning and move on.
619*b494511aSVenki Rajagopalan */
620*b494511aSVenki Rajagopalan ret = ibt_enable_cq_notify(info->ti_cq_hdl, IBT_NEXT_COMPLETION);
621*b494511aSVenki Rajagopalan if (ret != IBT_SUCCESS) {
622*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("ibt_enable_cq_notify(cq_hdl=0x%llx) "
623*b494511aSVenki Rajagopalan "failed, ret=%d", info->ti_cq_hdl, ret);
624*b494511aSVenki Rajagopalan }
625*b494511aSVenki Rajagopalan
626*b494511aSVenki Rajagopalan /*
627*b494511aSVenki Rajagopalan * Handle tx and rx completions
628*b494511aSVenki Rajagopalan */
629*b494511aSVenki Rajagopalan while ((ret = ibt_poll_cq(info->ti_cq_hdl, info->ti_wc, info->ti_cq_sz,
630*b494511aSVenki Rajagopalan &polled)) == IBT_SUCCESS) {
631*b494511aSVenki Rajagopalan for (wc = info->ti_wc, i = 0; i < polled; i++, wc++) {
632*b494511aSVenki Rajagopalan wqe = (eibnx_wqe_t *)(uintptr_t)wc->wc_id;
633*b494511aSVenki Rajagopalan if (wc->wc_status != IBT_WC_SUCCESS) {
634*b494511aSVenki Rajagopalan eibnx_handle_wcerr(wc->wc_status, wqe, info);
635*b494511aSVenki Rajagopalan } else if (wqe->qe_type == ENX_QETYP_RWQE) {
636*b494511aSVenki Rajagopalan eibnx_process_rx(info, wc, wqe);
637*b494511aSVenki Rajagopalan eibnx_return_rwqe(info, wqe);
638*b494511aSVenki Rajagopalan } else {
639*b494511aSVenki Rajagopalan eibnx_return_swqe(wqe);
640*b494511aSVenki Rajagopalan }
641*b494511aSVenki Rajagopalan }
642*b494511aSVenki Rajagopalan }
643*b494511aSVenki Rajagopalan
644*b494511aSVenki Rajagopalan /*
645*b494511aSVenki Rajagopalan * On the way out, make sure we wake up any pending death requestor
646*b494511aSVenki Rajagopalan * for the port-monitor thread. Note that we need to do a cv_broadcast()
647*b494511aSVenki Rajagopalan * here since there could be multiple threads sleeping on the event cv
648*b494511aSVenki Rajagopalan * and we want to make sure all waiters get a chance to see if it's
649*b494511aSVenki Rajagopalan * their turn.
650*b494511aSVenki Rajagopalan */
651*b494511aSVenki Rajagopalan mutex_enter(&info->ti_event_lock);
652*b494511aSVenki Rajagopalan info->ti_event &= (~ENX_EVENT_COMPLETION);
653*b494511aSVenki Rajagopalan cv_broadcast(&info->ti_event_cv);
654*b494511aSVenki Rajagopalan mutex_exit(&info->ti_event_lock);
655*b494511aSVenki Rajagopalan
656*b494511aSVenki Rajagopalan return (DDI_INTR_CLAIMED);
657*b494511aSVenki Rajagopalan }
658*b494511aSVenki Rajagopalan
659*b494511aSVenki Rajagopalan /*
660*b494511aSVenki Rajagopalan * Rx processing code
661*b494511aSVenki Rajagopalan */
662*b494511aSVenki Rajagopalan static void
eibnx_process_rx(eibnx_thr_info_t * info,ibt_wc_t * wc,eibnx_wqe_t * wqe)663*b494511aSVenki Rajagopalan eibnx_process_rx(eibnx_thr_info_t *info, ibt_wc_t *wc, eibnx_wqe_t *wqe)
664*b494511aSVenki Rajagopalan {
665*b494511aSVenki Rajagopalan eibnx_gw_msg_t msg;
666*b494511aSVenki Rajagopalan eibnx_gw_info_t *gwi;
667*b494511aSVenki Rajagopalan eibnx_gw_info_t *orig_gwi;
668*b494511aSVenki Rajagopalan eibnx_gw_info_t *new_gwi;
669*b494511aSVenki Rajagopalan uint_t orig_gw_state;
670*b494511aSVenki Rajagopalan uint8_t *pkt = (uint8_t *)(uintptr_t)(wqe->qe_sgl.ds_va);
671*b494511aSVenki Rajagopalan boolean_t gwi_changed;
672*b494511aSVenki Rajagopalan
673*b494511aSVenki Rajagopalan /*
674*b494511aSVenki Rajagopalan * We'll simply drop any packet (including broadcast advertisements
675*b494511aSVenki Rajagopalan * from gws) we receive before we've done our solicitation broadcast.
676*b494511aSVenki Rajagopalan */
677*b494511aSVenki Rajagopalan if (info->ti_mcast_done == 0) {
678*b494511aSVenki Rajagopalan return;
679*b494511aSVenki Rajagopalan }
680*b494511aSVenki Rajagopalan
681*b494511aSVenki Rajagopalan /*
682*b494511aSVenki Rajagopalan * Skip the GRH and parse the message in the packet
683*b494511aSVenki Rajagopalan */
684*b494511aSVenki Rajagopalan if (eibnx_fip_parse_pkt(pkt + ENX_GRH_SZ, &msg) != ENX_E_SUCCESS) {
685*b494511aSVenki Rajagopalan return;
686*b494511aSVenki Rajagopalan }
687*b494511aSVenki Rajagopalan
688*b494511aSVenki Rajagopalan /*
689*b494511aSVenki Rajagopalan * If it was a login ack for one of our children, we need to pass
690*b494511aSVenki Rajagopalan * it on to the child
691*b494511aSVenki Rajagopalan */
692*b494511aSVenki Rajagopalan if (msg.gm_type == FIP_VNIC_LOGIN_ACK) {
693*b494511aSVenki Rajagopalan eibnx_handle_login_ack(info, pkt);
694*b494511aSVenki Rajagopalan return;
695*b494511aSVenki Rajagopalan }
696*b494511aSVenki Rajagopalan
697*b494511aSVenki Rajagopalan /*
698*b494511aSVenki Rajagopalan * Other than that, we only handle gateway advertisements
699*b494511aSVenki Rajagopalan */
700*b494511aSVenki Rajagopalan if (msg.gm_type != FIP_GW_ADVERTISE_MCAST &&
701*b494511aSVenki Rajagopalan msg.gm_type != FIP_GW_ADVERTISE_UCAST) {
702*b494511aSVenki Rajagopalan return;
703*b494511aSVenki Rajagopalan }
704*b494511aSVenki Rajagopalan
705*b494511aSVenki Rajagopalan gwi = &msg.u.gm_info;
706*b494511aSVenki Rajagopalan
707*b494511aSVenki Rajagopalan /*
708*b494511aSVenki Rajagopalan * State machine to create eoib instances. Whether this advertisement
709*b494511aSVenki Rajagopalan * is from a new gateway or an old gateway that we already know about,
710*b494511aSVenki Rajagopalan * if this was a unicast response to our earlier solicitation and it's
711*b494511aSVenki Rajagopalan * the first time we're receiving it from this gateway, we're ready to
712*b494511aSVenki Rajagopalan * login, so we create the EoIB instance for it.
713*b494511aSVenki Rajagopalan */
714*b494511aSVenki Rajagopalan orig_gwi = eibnx_find_gw_in_gwlist(info, gwi);
715*b494511aSVenki Rajagopalan if (orig_gwi == NULL) {
716*b494511aSVenki Rajagopalan if (gwi->gw_flag_available == 0) {
717*b494511aSVenki Rajagopalan gwi->gw_state = ENX_GW_STATE_UNAVAILABLE;
718*b494511aSVenki Rajagopalan gwi->gw_adv_flag = ENX_GW_ALIVE;
719*b494511aSVenki Rajagopalan (void) eibnx_add_gw_to_gwlist(info, gwi, wc, pkt);
720*b494511aSVenki Rajagopalan } else if (gwi->gw_flag_ucast_advt == 0) {
721*b494511aSVenki Rajagopalan gwi->gw_state = ENX_GW_STATE_AVAILABLE;
722*b494511aSVenki Rajagopalan gwi->gw_adv_flag = ENX_GW_ALIVE;
723*b494511aSVenki Rajagopalan (void) eibnx_add_gw_to_gwlist(info, gwi, wc, pkt);
724*b494511aSVenki Rajagopalan } else {
725*b494511aSVenki Rajagopalan gwi->gw_state = ENX_GW_STATE_READY_TO_LOGIN;
726*b494511aSVenki Rajagopalan gwi->gw_adv_flag = ENX_GW_AWARE;
727*b494511aSVenki Rajagopalan if ((new_gwi = eibnx_add_gw_to_gwlist(info, gwi,
728*b494511aSVenki Rajagopalan wc, pkt)) != NULL) {
729*b494511aSVenki Rajagopalan eibnx_queue_for_creation(info, new_gwi);
730*b494511aSVenki Rajagopalan }
731*b494511aSVenki Rajagopalan }
732*b494511aSVenki Rajagopalan } else {
733*b494511aSVenki Rajagopalan orig_gw_state = orig_gwi->gw_state;
734*b494511aSVenki Rajagopalan if (gwi->gw_flag_available == 0) {
735*b494511aSVenki Rajagopalan gwi->gw_state = ENX_GW_STATE_UNAVAILABLE;
736*b494511aSVenki Rajagopalan eibnx_replace_gw_in_gwlist(info, orig_gwi, gwi,
737*b494511aSVenki Rajagopalan wc, pkt, NULL);
738*b494511aSVenki Rajagopalan eibnx_gw_is_alive(orig_gwi);
739*b494511aSVenki Rajagopalan
740*b494511aSVenki Rajagopalan } else if (gwi->gw_flag_ucast_advt == 0) {
741*b494511aSVenki Rajagopalan if (orig_gw_state == ENX_GW_STATE_UNAVAILABLE) {
742*b494511aSVenki Rajagopalan gwi->gw_state = ENX_GW_STATE_AVAILABLE;
743*b494511aSVenki Rajagopalan } else {
744*b494511aSVenki Rajagopalan gwi->gw_state = orig_gw_state;
745*b494511aSVenki Rajagopalan }
746*b494511aSVenki Rajagopalan eibnx_replace_gw_in_gwlist(info, orig_gwi, gwi,
747*b494511aSVenki Rajagopalan wc, pkt, NULL);
748*b494511aSVenki Rajagopalan eibnx_gw_is_alive(orig_gwi);
749*b494511aSVenki Rajagopalan
750*b494511aSVenki Rajagopalan } else {
751*b494511aSVenki Rajagopalan gwi->gw_state = ENX_GW_STATE_READY_TO_LOGIN;
752*b494511aSVenki Rajagopalan eibnx_replace_gw_in_gwlist(info, orig_gwi, gwi,
753*b494511aSVenki Rajagopalan wc, pkt, &gwi_changed);
754*b494511aSVenki Rajagopalan eibnx_gw_is_aware(info, orig_gwi, gwi_changed);
755*b494511aSVenki Rajagopalan
756*b494511aSVenki Rajagopalan if (orig_gw_state != ENX_GW_STATE_READY_TO_LOGIN)
757*b494511aSVenki Rajagopalan eibnx_queue_for_creation(info, orig_gwi);
758*b494511aSVenki Rajagopalan }
759*b494511aSVenki Rajagopalan }
760*b494511aSVenki Rajagopalan }
761*b494511aSVenki Rajagopalan
762*b494511aSVenki Rajagopalan /*ARGSUSED*/
763*b494511aSVenki Rajagopalan static void
eibnx_handle_wcerr(uint8_t wcerr,eibnx_wqe_t * wqe,eibnx_thr_info_t * info)764*b494511aSVenki Rajagopalan eibnx_handle_wcerr(uint8_t wcerr, eibnx_wqe_t *wqe, eibnx_thr_info_t *info)
765*b494511aSVenki Rajagopalan {
766*b494511aSVenki Rajagopalan /*
767*b494511aSVenki Rajagopalan * Currently, all we do is report
768*b494511aSVenki Rajagopalan */
769*b494511aSVenki Rajagopalan switch (wcerr) {
770*b494511aSVenki Rajagopalan case IBT_WC_WR_FLUSHED_ERR:
771*b494511aSVenki Rajagopalan ENX_DPRINTF_VERBOSE("IBT_WC_WR_FLUSHED_ERR seen "
772*b494511aSVenki Rajagopalan "(hca_guid=0x%llx, port_num=0x%x, wqe_type=0x%x)",
773*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num, wqe->qe_type);
774*b494511aSVenki Rajagopalan break;
775*b494511aSVenki Rajagopalan
776*b494511aSVenki Rajagopalan case IBT_WC_LOCAL_CHAN_OP_ERR:
777*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("IBT_WC_LOCAL_CHAN_OP_ERR seen "
778*b494511aSVenki Rajagopalan "(hca_guid=0x%llx, port_num=0x%x, wqe_type=0x%x)",
779*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num, wqe->qe_type);
780*b494511aSVenki Rajagopalan break;
781*b494511aSVenki Rajagopalan
782*b494511aSVenki Rajagopalan case IBT_WC_LOCAL_PROTECT_ERR:
783*b494511aSVenki Rajagopalan ENX_DPRINTF_ERR("IBT_WC_LOCAL_PROTECT_ERR seen "
784*b494511aSVenki Rajagopalan "(hca_guid=0x%llx, port_num=0x%x, wqe_type=0x%x)",
785*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num, wqe->qe_type);
786*b494511aSVenki Rajagopalan break;
787*b494511aSVenki Rajagopalan }
788*b494511aSVenki Rajagopalan }
789*b494511aSVenki Rajagopalan
790*b494511aSVenki Rajagopalan static void
eibnx_handle_login_ack(eibnx_thr_info_t * info,uint8_t * pkt)791*b494511aSVenki Rajagopalan eibnx_handle_login_ack(eibnx_thr_info_t *info, uint8_t *pkt)
792*b494511aSVenki Rajagopalan {
793*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
794*b494511aSVenki Rajagopalan fip_login_ack_t *ack;
795*b494511aSVenki Rajagopalan fip_desc_vnic_login_t *login;
796*b494511aSVenki Rajagopalan ddi_eventcookie_t cookie;
797*b494511aSVenki Rajagopalan dev_info_t *rdip;
798*b494511aSVenki Rajagopalan uint16_t vnic_id;
799*b494511aSVenki Rajagopalan uint16_t inst;
800*b494511aSVenki Rajagopalan int ret;
801*b494511aSVenki Rajagopalan
802*b494511aSVenki Rajagopalan /*
803*b494511aSVenki Rajagopalan * When we get login acknowledgements, we simply invoke the
804*b494511aSVenki Rajagopalan * appropriate EoIB driver callback to process it on behalf
805*b494511aSVenki Rajagopalan * of the driver instance. We will let the callback do error
806*b494511aSVenki Rajagopalan * checks.
807*b494511aSVenki Rajagopalan */
808*b494511aSVenki Rajagopalan ack = (fip_login_ack_t *)(pkt + ENX_GRH_SZ);
809*b494511aSVenki Rajagopalan login = &(ack->ak_vnic_login);
810*b494511aSVenki Rajagopalan vnic_id = ntohs(login->vl_vnic_id);
811*b494511aSVenki Rajagopalan inst = EIB_DEVI_INSTANCE(vnic_id);
812*b494511aSVenki Rajagopalan
813*b494511aSVenki Rajagopalan if ((rdip = eibnx_find_child_dip_by_inst(info, inst)) == NULL) {
814*b494511aSVenki Rajagopalan ENX_DPRINTF_DEBUG("no eoib child with instance 0x%x found "
815*b494511aSVenki Rajagopalan "for (hca_guid=0x%llx, port_num=0x%x)", inst,
816*b494511aSVenki Rajagopalan info->ti_hca_guid, info->ti_pi->p_port_num);
817*b494511aSVenki Rajagopalan return;
818*b494511aSVenki Rajagopalan }
819*b494511aSVenki Rajagopalan
820*b494511aSVenki Rajagopalan ret = ndi_event_retrieve_cookie(enx_ndi_event_hdl, rdip,
821*b494511aSVenki Rajagopalan EIB_NDI_EVENT_LOGIN_ACK, &cookie, NDI_EVENT_NOPASS);
822*b494511aSVenki Rajagopalan if (ret != NDI_SUCCESS) {
823*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("no login-ack cookie for (hca_guid=0x%llx, "
824*b494511aSVenki Rajagopalan "port_num=0x%x, eoib_inst=0x%x), ret=%d", info->ti_hca_guid,
825*b494511aSVenki Rajagopalan info->ti_pi->p_port_num, inst, ret);
826*b494511aSVenki Rajagopalan return;
827*b494511aSVenki Rajagopalan }
828*b494511aSVenki Rajagopalan
829*b494511aSVenki Rajagopalan (void) ndi_post_event(ss->nx_dip, rdip, cookie, (void *)pkt);
830*b494511aSVenki Rajagopalan }
831*b494511aSVenki Rajagopalan
832*b494511aSVenki Rajagopalan static void
eibnx_handle_gw_rebirth(eibnx_thr_info_t * info,uint16_t portid)833*b494511aSVenki Rajagopalan eibnx_handle_gw_rebirth(eibnx_thr_info_t *info, uint16_t portid)
834*b494511aSVenki Rajagopalan {
835*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
836*b494511aSVenki Rajagopalan ddi_eventcookie_t cookie;
837*b494511aSVenki Rajagopalan dev_info_t *rdip;
838*b494511aSVenki Rajagopalan int ret;
839*b494511aSVenki Rajagopalan
840*b494511aSVenki Rajagopalan if ((rdip = eibnx_find_child_dip_by_gw(info, portid)) == NULL) {
841*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("no eoib child bound to gw portid 0x%x "
842*b494511aSVenki Rajagopalan "found for (hca_guid=0x%llx, port_num=0x%x)",
843*b494511aSVenki Rajagopalan portid, info->ti_hca_guid, info->ti_pi->p_port_num);
844*b494511aSVenki Rajagopalan return;
845*b494511aSVenki Rajagopalan }
846*b494511aSVenki Rajagopalan
847*b494511aSVenki Rajagopalan ret = ndi_event_retrieve_cookie(enx_ndi_event_hdl, rdip,
848*b494511aSVenki Rajagopalan EIB_NDI_EVENT_GW_AVAILABLE, &cookie, NDI_EVENT_NOPASS);
849*b494511aSVenki Rajagopalan if (ret != NDI_SUCCESS) {
850*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("no gw-available cookie for (hca_guid=0x%llx, "
851*b494511aSVenki Rajagopalan "port_num=0x%x, gw_portid=0x%x), ret=%d", info->ti_hca_guid,
852*b494511aSVenki Rajagopalan info->ti_pi->p_port_num, portid, ret);
853*b494511aSVenki Rajagopalan return;
854*b494511aSVenki Rajagopalan }
855*b494511aSVenki Rajagopalan
856*b494511aSVenki Rajagopalan (void) ndi_post_event(ss->nx_dip, rdip, cookie, NULL);
857*b494511aSVenki Rajagopalan }
858*b494511aSVenki Rajagopalan
859*b494511aSVenki Rajagopalan static void
eibnx_handle_gw_info_update(eibnx_thr_info_t * info,uint16_t portid,void * new_gw_info)860*b494511aSVenki Rajagopalan eibnx_handle_gw_info_update(eibnx_thr_info_t *info, uint16_t portid,
861*b494511aSVenki Rajagopalan void *new_gw_info)
862*b494511aSVenki Rajagopalan {
863*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
864*b494511aSVenki Rajagopalan ddi_eventcookie_t cookie;
865*b494511aSVenki Rajagopalan dev_info_t *rdip;
866*b494511aSVenki Rajagopalan int ret;
867*b494511aSVenki Rajagopalan
868*b494511aSVenki Rajagopalan if ((rdip = eibnx_find_child_dip_by_gw(info, portid)) == NULL) {
869*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("no eoib child bound to gw portid 0x%x "
870*b494511aSVenki Rajagopalan "found for (hca_guid=0x%llx, port_num=0x%x)",
871*b494511aSVenki Rajagopalan portid, info->ti_hca_guid, info->ti_pi->p_port_num);
872*b494511aSVenki Rajagopalan return;
873*b494511aSVenki Rajagopalan }
874*b494511aSVenki Rajagopalan
875*b494511aSVenki Rajagopalan ret = ndi_event_retrieve_cookie(enx_ndi_event_hdl, rdip,
876*b494511aSVenki Rajagopalan EIB_NDI_EVENT_GW_INFO_UPDATE, &cookie, NDI_EVENT_NOPASS);
877*b494511aSVenki Rajagopalan if (ret != NDI_SUCCESS) {
878*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("no gw-info-update cookie for "
879*b494511aSVenki Rajagopalan "(hca_guid=0x%llx, port_num=0x%x, gw_portid=0x%x), "
880*b494511aSVenki Rajagopalan "ret=%d", info->ti_hca_guid, info->ti_pi->p_port_num,
881*b494511aSVenki Rajagopalan portid, ret);
882*b494511aSVenki Rajagopalan return;
883*b494511aSVenki Rajagopalan }
884*b494511aSVenki Rajagopalan
885*b494511aSVenki Rajagopalan (void) ndi_post_event(ss->nx_dip, rdip, cookie, new_gw_info);
886*b494511aSVenki Rajagopalan }
887*b494511aSVenki Rajagopalan
888*b494511aSVenki Rajagopalan static int
eibnx_replace_portinfo(eibnx_thr_info_t * ti,ibt_hca_portinfo_t * new_pi,uint_t new_size_pi)889*b494511aSVenki Rajagopalan eibnx_replace_portinfo(eibnx_thr_info_t *ti, ibt_hca_portinfo_t *new_pi,
890*b494511aSVenki Rajagopalan uint_t new_size_pi)
891*b494511aSVenki Rajagopalan {
892*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
893*b494511aSVenki Rajagopalan eibnx_hca_t *hca;
894*b494511aSVenki Rajagopalan eibnx_port_t *port;
895*b494511aSVenki Rajagopalan
896*b494511aSVenki Rajagopalan mutex_enter(&ss->nx_lock);
897*b494511aSVenki Rajagopalan
898*b494511aSVenki Rajagopalan for (hca = ss->nx_hca; hca; hca = hca->hc_next) {
899*b494511aSVenki Rajagopalan if (hca->hc_hdl == ti->ti_hca)
900*b494511aSVenki Rajagopalan break;
901*b494511aSVenki Rajagopalan }
902*b494511aSVenki Rajagopalan
903*b494511aSVenki Rajagopalan if (hca == NULL) {
904*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("hca hdl (0x%llx) not found in hca list",
905*b494511aSVenki Rajagopalan ti->ti_hca);
906*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
907*b494511aSVenki Rajagopalan return (ENX_E_FAILURE);
908*b494511aSVenki Rajagopalan }
909*b494511aSVenki Rajagopalan
910*b494511aSVenki Rajagopalan for (port = hca->hc_port; port; port = port->po_next) {
911*b494511aSVenki Rajagopalan if (port->po_pi == ti->ti_pi) {
912*b494511aSVenki Rajagopalan ibt_free_portinfo(port->po_pi, port->po_pi_size);
913*b494511aSVenki Rajagopalan port->po_pi = new_pi;
914*b494511aSVenki Rajagopalan port->po_pi_size = new_size_pi;
915*b494511aSVenki Rajagopalan ti->ti_pi = port->po_pi;
916*b494511aSVenki Rajagopalan break;
917*b494511aSVenki Rajagopalan }
918*b494511aSVenki Rajagopalan }
919*b494511aSVenki Rajagopalan
920*b494511aSVenki Rajagopalan if (port == NULL) {
921*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("portinfo (0x%llx) not found in hca list",
922*b494511aSVenki Rajagopalan ti->ti_pi);
923*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
924*b494511aSVenki Rajagopalan return (ENX_E_FAILURE);
925*b494511aSVenki Rajagopalan }
926*b494511aSVenki Rajagopalan
927*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
928*b494511aSVenki Rajagopalan
929*b494511aSVenki Rajagopalan return (ENX_E_SUCCESS);
930*b494511aSVenki Rajagopalan }
931*b494511aSVenki Rajagopalan
932*b494511aSVenki Rajagopalan static void
eibnx_handle_port_events(ibt_hca_hdl_t ev_hca,uint8_t ev_portnum)933*b494511aSVenki Rajagopalan eibnx_handle_port_events(ibt_hca_hdl_t ev_hca, uint8_t ev_portnum)
934*b494511aSVenki Rajagopalan {
935*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
936*b494511aSVenki Rajagopalan eibnx_thr_info_t *ti;
937*b494511aSVenki Rajagopalan ibt_hca_portinfo_t *pi;
938*b494511aSVenki Rajagopalan ibt_status_t ret;
939*b494511aSVenki Rajagopalan uint_t num_pi;
940*b494511aSVenki Rajagopalan uint_t size_pi;
941*b494511aSVenki Rajagopalan uint8_t itr;
942*b494511aSVenki Rajagopalan
943*b494511aSVenki Rajagopalan /*
944*b494511aSVenki Rajagopalan * Find the port monitor thread that matches the event hca and
945*b494511aSVenki Rajagopalan * portnum
946*b494511aSVenki Rajagopalan */
947*b494511aSVenki Rajagopalan mutex_enter(&ss->nx_lock);
948*b494511aSVenki Rajagopalan for (ti = ss->nx_thr_info; ti; ti = ti->ti_next) {
949*b494511aSVenki Rajagopalan if ((ti->ti_hca == ev_hca) &&
950*b494511aSVenki Rajagopalan (ti->ti_pi->p_port_num == ev_portnum)) {
951*b494511aSVenki Rajagopalan break;
952*b494511aSVenki Rajagopalan }
953*b494511aSVenki Rajagopalan }
954*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
955*b494511aSVenki Rajagopalan
956*b494511aSVenki Rajagopalan if (ti == NULL)
957*b494511aSVenki Rajagopalan return;
958*b494511aSVenki Rajagopalan
959*b494511aSVenki Rajagopalan /*
960*b494511aSVenki Rajagopalan * See if we need to rejoin the mcgs for this port and do so if true
961*b494511aSVenki Rajagopalan */
962*b494511aSVenki Rajagopalan ret = ibt_query_hca_ports(ev_hca, ev_portnum, &pi, &num_pi, &size_pi);
963*b494511aSVenki Rajagopalan if (ret != IBT_SUCCESS) {
964*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("ibt_query_hca_ports() failed with %d", ret);
965*b494511aSVenki Rajagopalan return;
966*b494511aSVenki Rajagopalan } else if (num_pi != 1 || pi->p_linkstate != IBT_PORT_ACTIVE) {
967*b494511aSVenki Rajagopalan ENX_DPRINTF_WARN("ibt_query_hca_ports(port_num=%d) failed, "
968*b494511aSVenki Rajagopalan "num_pi=%d, linkstate=0x%x", ev_portnum, num_pi,
969*b494511aSVenki Rajagopalan pi->p_linkstate);
970*b494511aSVenki Rajagopalan ibt_free_portinfo(pi, size_pi);
971*b494511aSVenki Rajagopalan return;
972*b494511aSVenki Rajagopalan }
973*b494511aSVenki Rajagopalan
974*b494511aSVenki Rajagopalan itr = pi->p_init_type_reply;
975*b494511aSVenki Rajagopalan if (ENX_PORT_ATTR_LOADED(itr) && ENX_PORT_ATTR_NOT_PRESERVED(itr)) {
976*b494511aSVenki Rajagopalan /*
977*b494511aSVenki Rajagopalan * If our port's base lid has changed, we need to replace
978*b494511aSVenki Rajagopalan * the saved portinfo in our lists with the new one before
979*b494511aSVenki Rajagopalan * going further.
980*b494511aSVenki Rajagopalan */
981*b494511aSVenki Rajagopalan if (ti->ti_pi->p_base_lid != pi->p_base_lid) {
982*b494511aSVenki Rajagopalan if (eibnx_replace_portinfo(ti, pi, size_pi) ==
983*b494511aSVenki Rajagopalan ENX_E_SUCCESS) {
984*b494511aSVenki Rajagopalan pi = NULL;
985*b494511aSVenki Rajagopalan size_pi = 0;
986*b494511aSVenki Rajagopalan }
987*b494511aSVenki Rajagopalan }
988*b494511aSVenki Rajagopalan }
989*b494511aSVenki Rajagopalan
990*b494511aSVenki Rajagopalan /*
991*b494511aSVenki Rajagopalan * If the port monitor was stuck waiting for the link to come up,
992*b494511aSVenki Rajagopalan * let it know that it is up now.
993*b494511aSVenki Rajagopalan */
994*b494511aSVenki Rajagopalan mutex_enter(&ti->ti_event_lock);
995*b494511aSVenki Rajagopalan if ((ti->ti_progress & ENX_MON_LINKSTATE_UP) != ENX_MON_LINKSTATE_UP) {
996*b494511aSVenki Rajagopalan ti->ti_pi->p_linkstate = IBT_PORT_ACTIVE;
997*b494511aSVenki Rajagopalan ti->ti_event |= ENX_EVENT_LINK_UP;
998*b494511aSVenki Rajagopalan cv_broadcast(&ti->ti_event_cv);
999*b494511aSVenki Rajagopalan }
1000*b494511aSVenki Rajagopalan mutex_exit(&ti->ti_event_lock);
1001*b494511aSVenki Rajagopalan
1002*b494511aSVenki Rajagopalan if (ENX_PORT_PRES_NOT_PRESERVED(itr)) {
1003*b494511aSVenki Rajagopalan if (ti->ti_progress & ENX_MON_JOINED_MCGS)
1004*b494511aSVenki Rajagopalan (void) eibnx_rejoin_mcgs(ti);
1005*b494511aSVenki Rajagopalan }
1006*b494511aSVenki Rajagopalan
1007*b494511aSVenki Rajagopalan if (pi != NULL)
1008*b494511aSVenki Rajagopalan ibt_free_portinfo(pi, size_pi);
1009*b494511aSVenki Rajagopalan }
1010*b494511aSVenki Rajagopalan
1011*b494511aSVenki Rajagopalan static void
eibnx_handle_hca_attach(ib_guid_t new_hca_guid)1012*b494511aSVenki Rajagopalan eibnx_handle_hca_attach(ib_guid_t new_hca_guid)
1013*b494511aSVenki Rajagopalan {
1014*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
1015*b494511aSVenki Rajagopalan eibnx_thr_info_t *ti;
1016*b494511aSVenki Rajagopalan eibnx_hca_t *hca;
1017*b494511aSVenki Rajagopalan eibnx_port_t *port;
1018*b494511aSVenki Rajagopalan
1019*b494511aSVenki Rajagopalan /*
1020*b494511aSVenki Rajagopalan * All we need to do is to start a port monitor for all the ports
1021*b494511aSVenki Rajagopalan * on the new HCA. To do this, go through our current port monitors
1022*b494511aSVenki Rajagopalan * and see if we already have a monitor for this HCA - if so, print
1023*b494511aSVenki Rajagopalan * a warning and return.
1024*b494511aSVenki Rajagopalan */
1025*b494511aSVenki Rajagopalan mutex_enter(&ss->nx_lock);
1026*b494511aSVenki Rajagopalan for (ti = ss->nx_thr_info; ti; ti = ti->ti_next) {
1027*b494511aSVenki Rajagopalan if (ti->ti_hca_guid == new_hca_guid) {
1028*b494511aSVenki Rajagopalan ENX_DPRINTF_VERBOSE("hca (guid=0x%llx) already "
1029*b494511aSVenki Rajagopalan "attached", new_hca_guid);
1030*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
1031*b494511aSVenki Rajagopalan return;
1032*b494511aSVenki Rajagopalan }
1033*b494511aSVenki Rajagopalan }
1034*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
1035*b494511aSVenki Rajagopalan
1036*b494511aSVenki Rajagopalan /*
1037*b494511aSVenki Rajagopalan * If we don't have it in our list, process the HCA and start the
1038*b494511aSVenki Rajagopalan * port monitors
1039*b494511aSVenki Rajagopalan */
1040*b494511aSVenki Rajagopalan if ((hca = eibnx_prepare_hca(new_hca_guid)) != NULL) {
1041*b494511aSVenki Rajagopalan mutex_enter(&ss->nx_lock);
1042*b494511aSVenki Rajagopalan
1043*b494511aSVenki Rajagopalan hca->hc_next = ss->nx_hca;
1044*b494511aSVenki Rajagopalan ss->nx_hca = hca;
1045*b494511aSVenki Rajagopalan
1046*b494511aSVenki Rajagopalan for (port = hca->hc_port; port; port = port->po_next) {
1047*b494511aSVenki Rajagopalan ti = eibnx_start_port_monitor(hca, port);
1048*b494511aSVenki Rajagopalan
1049*b494511aSVenki Rajagopalan ti->ti_next = ss->nx_thr_info;
1050*b494511aSVenki Rajagopalan ss->nx_thr_info = ti;
1051*b494511aSVenki Rajagopalan }
1052*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
1053*b494511aSVenki Rajagopalan }
1054*b494511aSVenki Rajagopalan }
1055*b494511aSVenki Rajagopalan
1056*b494511aSVenki Rajagopalan static void
eibnx_handle_hca_detach(ib_guid_t del_hca_guid)1057*b494511aSVenki Rajagopalan eibnx_handle_hca_detach(ib_guid_t del_hca_guid)
1058*b494511aSVenki Rajagopalan {
1059*b494511aSVenki Rajagopalan eibnx_t *ss = enx_global_ss;
1060*b494511aSVenki Rajagopalan eibnx_thr_info_t *ti;
1061*b494511aSVenki Rajagopalan eibnx_thr_info_t *ti_stop_list = NULL;
1062*b494511aSVenki Rajagopalan eibnx_thr_info_t *ti_prev;
1063*b494511aSVenki Rajagopalan eibnx_thr_info_t *ti_next;
1064*b494511aSVenki Rajagopalan eibnx_hca_t *hca;
1065*b494511aSVenki Rajagopalan eibnx_hca_t *hca_prev;
1066*b494511aSVenki Rajagopalan
1067*b494511aSVenki Rajagopalan /*
1068*b494511aSVenki Rajagopalan * We need to locate all monitor threads for this HCA and stop them
1069*b494511aSVenki Rajagopalan */
1070*b494511aSVenki Rajagopalan mutex_enter(&ss->nx_lock);
1071*b494511aSVenki Rajagopalan ti_prev = NULL;
1072*b494511aSVenki Rajagopalan for (ti = ss->nx_thr_info; ti; ti = ti_next) {
1073*b494511aSVenki Rajagopalan ti_next = ti->ti_next;
1074*b494511aSVenki Rajagopalan
1075*b494511aSVenki Rajagopalan if (ti->ti_hca_guid != del_hca_guid) {
1076*b494511aSVenki Rajagopalan ti_prev = ti;
1077*b494511aSVenki Rajagopalan } else {
1078*b494511aSVenki Rajagopalan /*
1079*b494511aSVenki Rajagopalan * Take it out from the good list
1080*b494511aSVenki Rajagopalan */
1081*b494511aSVenki Rajagopalan if (ti_prev)
1082*b494511aSVenki Rajagopalan ti_prev->ti_next = ti_next;
1083*b494511aSVenki Rajagopalan else
1084*b494511aSVenki Rajagopalan ss->nx_thr_info = ti_next;
1085*b494511aSVenki Rajagopalan
1086*b494511aSVenki Rajagopalan /*
1087*b494511aSVenki Rajagopalan * And put it in the to-stop list
1088*b494511aSVenki Rajagopalan */
1089*b494511aSVenki Rajagopalan ti->ti_next = ti_stop_list;
1090*b494511aSVenki Rajagopalan ti_stop_list = ti;
1091*b494511aSVenki Rajagopalan }
1092*b494511aSVenki Rajagopalan }
1093*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
1094*b494511aSVenki Rajagopalan
1095*b494511aSVenki Rajagopalan /*
1096*b494511aSVenki Rajagopalan * Ask all the port_monitor threads to die.
1097*b494511aSVenki Rajagopalan */
1098*b494511aSVenki Rajagopalan for (ti = ti_stop_list; ti; ti = ti_next) {
1099*b494511aSVenki Rajagopalan ti_next = ti->ti_next;
1100*b494511aSVenki Rajagopalan eibnx_stop_port_monitor(ti);
1101*b494511aSVenki Rajagopalan }
1102*b494511aSVenki Rajagopalan
1103*b494511aSVenki Rajagopalan /*
1104*b494511aSVenki Rajagopalan * Now, locate the HCA in our list and release all HCA related
1105*b494511aSVenki Rajagopalan * resources.
1106*b494511aSVenki Rajagopalan */
1107*b494511aSVenki Rajagopalan mutex_enter(&ss->nx_lock);
1108*b494511aSVenki Rajagopalan hca_prev = NULL;
1109*b494511aSVenki Rajagopalan for (hca = ss->nx_hca; hca; hca = hca->hc_next) {
1110*b494511aSVenki Rajagopalan if (hca->hc_guid != del_hca_guid) {
1111*b494511aSVenki Rajagopalan hca_prev = hca;
1112*b494511aSVenki Rajagopalan } else {
1113*b494511aSVenki Rajagopalan if (hca_prev) {
1114*b494511aSVenki Rajagopalan hca_prev->hc_next = hca->hc_next;
1115*b494511aSVenki Rajagopalan } else {
1116*b494511aSVenki Rajagopalan ss->nx_hca = hca->hc_next;
1117*b494511aSVenki Rajagopalan }
1118*b494511aSVenki Rajagopalan hca->hc_next = NULL;
1119*b494511aSVenki Rajagopalan break;
1120*b494511aSVenki Rajagopalan }
1121*b494511aSVenki Rajagopalan }
1122*b494511aSVenki Rajagopalan mutex_exit(&ss->nx_lock);
1123*b494511aSVenki Rajagopalan
1124*b494511aSVenki Rajagopalan if (hca) {
1125*b494511aSVenki Rajagopalan (void) eibnx_cleanup_hca(hca);
1126*b494511aSVenki Rajagopalan }
1127*b494511aSVenki Rajagopalan }
1128