xref: /illumos-gate/usr/src/uts/common/io/bnx/bnxint.c (revision eef4f27b)
1 /*
2  * Copyright 2014-2017 Cavium, Inc.
3  * The contents of this file are subject to the terms of the Common Development
4  * and Distribution License, v.1,  (the "License").
5  *
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the License at available
9  * at http://opensource.org/licenses/CDDL-1.0
10  *
11  * See the License for the specific language governing permissions and
12  * limitations under the License.
13  */
14 
15 /*
16  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
17  * Copyright (c) 2019, Joyent, Inc.
18  */
19 
20 #include "bnxint.h"
21 #include "bnxsnd.h"
22 #include "bnxrcv.h"
23 
24 
25 #define	BNX_INTR_NUMBER 0
26 
27 /*
28  * Name:    bnx_intr_priv
29  *
30  * Input:   ptr to um_device_t
31  *
32  * Return:  Interrupt status
33  *
34  * Description:
35  *          This routine is called from ISR and POLL API routines to consume
36  *          any pending events. This function determines if there is any
37  *          pending status and calls corresponding LM functions to consume
38  *          the event. L2 driver consumes three events - L2 Tx compete,
39  *          L2 Rx indication and link status change.
40  */
41 static lm_interrupt_status_t
bnx_intr_priv(um_device_t * const umdevice)42 bnx_intr_priv(um_device_t *const umdevice)
43 {
44 	u32_t idx;
45 	lm_device_t *lmdevice;
46 	lm_interrupt_status_t intrstat;
47 
48 	lmdevice = &(umdevice->lm_dev);
49 
50 	/*
51 	 * Following LM routine checks for pending interrupts and
52 	 * returns corresponding bits set in a 32bit integer value.
53 	 */
54 	intrstat = lm_get_interrupt_status(lmdevice);
55 
56 	if (intrstat & LM_KNOCK_KNOCK_EVENT) {
57 		um_send_driver_pulse(umdevice);
58 	}
59 
60 	if (intrstat & LM_RX_EVENT_MASK) {
61 		for (idx = RX_CHAIN_IDX0; idx < NUM_RX_CHAIN; idx++) {
62 			if (intrstat & (LM_RX0_EVENT_ACTIVE << idx)) {
63 				s_list_t *waitq;
64 
65 				waitq = &(_RX_QINFO(umdevice, idx).waitq);
66 
67 				mutex_enter(&umdevice->os_param.rcv_mutex);
68 				(void) lm_get_packets_rcvd(lmdevice, idx, 0,
69 				    waitq);
70 				mutex_exit(&umdevice->os_param.rcv_mutex);
71 			}
72 		}
73 	}
74 
75 	if (intrstat & LM_TX_EVENT_MASK) {
76 		for (idx = TX_CHAIN_IDX0; idx < NUM_TX_CHAIN; idx++) {
77 			if (intrstat & (LM_TX0_EVENT_ACTIVE << idx)) {
78 				/* This call is mutex protected internally. */
79 				bnx_xmit_ring_intr(umdevice, idx);
80 			}
81 		}
82 	}
83 
84 	if (intrstat & LM_PHY_EVENT_ACTIVE) {
85 		mutex_enter(&umdevice->os_param.phy_mutex);
86 		lm_service_phy_int(lmdevice, FALSE);
87 		mutex_exit(&umdevice->os_param.phy_mutex);
88 	}
89 
90 	return (intrstat);
91 }
92 
93 /*
94  * Description:
95  *
96  * This function sends rx traffic up the stack and replenishes the hardware
97  * rx buffers.  Although we share the responsibility of replenishing the
98  * rx buffers with the timer, we still need to wait here indefinitely.  This
99  * is the only place where we send rx traffic back up the stack.
100  *
101  * We go through a lot of mental gymnastics to make sure we are not holding a
102  * lock while calling gld_recv().  We can deadlock in the following scenario
103  * if we aren't careful :
104  *
105  * Thread 1:
106  *          bnx_intr_disable()
107  *              bnx_intr_wait()
108  *                  mutex_enter(intr_*_mutex)
109  *
110  * Thread 2:
111  *          bnx_intr_[soft|1lvl]()
112  *              bnx_intr_recv()
113  *                  mutex_enter(rcv_mutex)
114  *
115  * Thread 3:
116  *          bnx_intr_[soft|1lvl]()
117  *              mutex_enter(intr_*_mutex)
118  *              mutex_enter(rcv_mutex)
119  *
120  * Return:
121  */
122 static void
bnx_intr_recv(um_device_t * const umdevice)123 bnx_intr_recv(um_device_t * const umdevice)
124 {
125 	mutex_enter(&umdevice->os_param.rcv_mutex);
126 
127 	if (umdevice->intr_enabled == B_TRUE) {
128 		/*
129 		 * Send the rx packets up.  This function will release and
130 		 * acquire the receive mutex across calls to gld_recv().
131 		 */
132 		bnx_rxpkts_intr(umdevice);
133 	}
134 
135 	/*
136 	 * Since gld_recv() can hang while decommisioning the driver, we
137 	 * need to double check that interrupts are still enabled before
138 	 * attempting to replenish the rx buffers.
139 	 */
140 	if (umdevice->intr_enabled == B_TRUE) {
141 		/* This function does an implicit *_fill(). */
142 		bnx_rxpkts_post(umdevice);
143 	}
144 
145 	mutex_exit(&umdevice->os_param.rcv_mutex);
146 }
147 
148 static void
bnx_intr_xmit(um_device_t * const umdevice)149 bnx_intr_xmit(um_device_t *const umdevice)
150 {
151 	mutex_enter(&umdevice->os_param.xmit_mutex);
152 
153 	if (umdevice->intr_enabled == B_TRUE) {
154 		/*
155 		 * Send the tx packets in waitq & notify the GLD.
156 		 */
157 		bnx_txpkts_intr(umdevice);
158 	}
159 
160 	mutex_exit(&umdevice->os_param.xmit_mutex);
161 }
162 
163 static unsigned int
bnx_intr_1lvl(caddr_t arg1,caddr_t arg2)164 bnx_intr_1lvl(caddr_t arg1, caddr_t arg2)
165 {
166 	lm_device_t *lmdevice;
167 	um_device_t *umdevice;
168 	lm_interrupt_status_t intrstat = 0;
169 	u32_t value32;
170 	umdevice = (um_device_t *)arg1;
171 
172 	lmdevice = &(umdevice->lm_dev);
173 
174 	mutex_enter(&umdevice->intr_mutex);
175 
176 	if (umdevice->intr_enabled != B_TRUE) {
177 		/*
178 		 * The interrupt cannot be ours.  Interrupts
179 		 * from our device have been disabled.
180 		 */
181 		mutex_exit(&umdevice->intr_mutex);
182 		umdevice->intr_in_disabled++;
183 		return (DDI_INTR_UNCLAIMED);
184 	}
185 
186 	/* Make sure we are working with current data. */
187 	(void) ddi_dma_sync(*(umdevice->os_param.status_block_dma_hdl), 0,
188 	    STATUS_BLOCK_BUFFER_SIZE, DDI_DMA_SYNC_FORKERNEL);
189 
190 	/* Make sure it is our device that is interrupting. */
191 	if (lmdevice->vars.status_virt->deflt.status_idx ==
192 	    umdevice->dev_var.processed_status_idx) {
193 		/*
194 		 * It is possible that we could have arrived at the ISR
195 		 * before the status block had a chance to be DMA'd into
196 		 * host memory.  Reading the status of the INTA line will
197 		 * implicitly force the DMA, and inform us of whether we
198 		 * are truly interrupting.  INTA is active low.
199 		 */
200 		REG_RD(lmdevice, pci_config.pcicfg_misc_status, &value32);
201 		if (value32 & PCICFG_MISC_STATUS_INTA_VALUE) {
202 			/* This isn't our interrupt. */
203 			umdevice->intr_no_change++;
204 			mutex_exit(&umdevice->intr_mutex);
205 			return (DDI_INTR_UNCLAIMED);
206 		}
207 	}
208 
209 	umdevice->intrFired++;
210 
211 	/* Disable interrupt and enqueue soft intr processing. */
212 	REG_WR(lmdevice, pci_config.pcicfg_int_ack_cmd,
213 	    (PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
214 	    PCICFG_INT_ACK_CMD_MASK_INT));
215 
216 	FLUSHPOSTEDWRITES(lmdevice);
217 
218 	umdevice->dev_var.processed_status_idx =
219 	    lmdevice->vars.status_virt->deflt.status_idx;
220 
221 	/* Service the interrupts. */
222 	intrstat = bnx_intr_priv(umdevice);
223 
224 	value32 = umdevice->dev_var.processed_status_idx;
225 	value32 |= PCICFG_INT_ACK_CMD_INDEX_VALID;
226 
227 	/*
228 	 * Inform the hardware of the last interrupt event we processed
229 	 * and reinstate the hardware's ability to assert interrupts.
230 	 */
231 	REG_WR(lmdevice, pci_config.pcicfg_int_ack_cmd, value32);
232 
233 	FLUSHPOSTEDWRITES(lmdevice);
234 
235 	umdevice->intr_count++;
236 
237 	if (intrstat & LM_RX_EVENT_MASK) {
238 		bnx_intr_recv(umdevice);
239 	}
240 
241 	if (intrstat & LM_TX_EVENT_MASK) {
242 		bnx_intr_xmit(umdevice);
243 	}
244 
245 	mutex_exit(&umdevice->intr_mutex);
246 
247 	return (DDI_INTR_CLAIMED);
248 }
249 
250 void
bnx_intr_enable(um_device_t * const umdevice)251 bnx_intr_enable(um_device_t * const umdevice)
252 {
253 	int rc;
254 
255 	umdevice->intr_count = 0;
256 
257 	/*
258 	 * Allow interrupts to touch the hardware.
259 	 */
260 	umdevice->intr_enabled = B_TRUE;
261 
262 	if ((rc = ddi_intr_enable(umdevice->pIntrBlock[0])) != DDI_SUCCESS) {
263 		cmn_err(CE_WARN, "%s: Failed to enable default isr block (%d)",
264 		    umdevice->dev_name, rc);
265 		return; /* XXX return error */
266 	}
267 
268 	/* Allow the hardware to generate interrupts. */
269 	lm_enable_int(&(umdevice->lm_dev));
270 
271 	FLUSHPOSTEDWRITES(&(umdevice->lm_dev));
272 
273 	/*
274 	 * XXX This delay is here because of a discovered problem regarding a
275 	 * call to ddi_intr_disable immediately after enabling interrupts.  This
276 	 * can occur with the "ifconfig -a plumb up" command which brings an
277 	 * interface up/down/up/down/up.  There seems to be a race condition
278 	 * between the ddi_intr_enable/lm_enable_int and ddi_intr_disable
279 	 * routines that results in interrupts to no longer fire on the
280 	 * interface and a REBOOT IS REQUIRED to fix!
281 	 */
282 	drv_usecwait(2000000);
283 }
284 
285 /*
286  * Description:
287  *
288  * This function makes sure the ISR no longer touches the hardware.  It
289  * accomplishes this by making sure the ISR either completes, or that it
290  * acknowledges the intr_enabled status change.
291  *
292  * Return:
293  */
294 static void
bnx_intr_wait(um_device_t * const umdevice)295 bnx_intr_wait(um_device_t * const umdevice)
296 {
297 	if (mutex_tryenter(&umdevice->intr_mutex)) {
298 		/*
299 		 * If we were able to get the hardware interrupt mutex, then it
300 		 * means that either the ISR wasn't processing at this time, or
301 		 * that it was at the end, processing the receive packets. If it
302 		 * the latter case, then all we need to do is acquire the
303 		 * rcv_mutex.  If we can acquire it, it means the receive
304 		 * processing is stalled, waiting for a GLD mutex, or that the
305 		 * ISR is not processing RX packets.
306 		 */
307 		mutex_enter(&umdevice->os_param.rcv_mutex);
308 		mutex_exit(&umdevice->os_param.rcv_mutex);
309 	} else {
310 		/*
311 		 * We couldn't acquire the hardware interrupt mutex. This means
312 		 * the ISR is running.  Wait for it to complete by
313 		 * (re)attempting to acquire the interrupt mutex. Whether we
314 		 * acquire it immediately or not, we will know the ISR has
315 		 * acknowledged the intr_enabled status change.
316 		 */
317 		mutex_enter(&umdevice->intr_mutex);
318 	}
319 	mutex_exit(&umdevice->intr_mutex);
320 }
321 
322 
323 void
bnx_intr_disable(um_device_t * const umdevice)324 bnx_intr_disable(um_device_t * const umdevice)
325 {
326 	int rc;
327 
328 	/*
329 	 * Prevent any future interrupts to no longer touch the hardware.
330 	 */
331 	umdevice->intr_enabled = B_FALSE;
332 
333 	/*
334 	 * Wait for any currently running interrupt to complete.
335 	 */
336 	bnx_intr_wait(umdevice);
337 
338 	/* Stop the device from generating any interrupts. */
339 	lm_disable_int(&(umdevice->lm_dev));
340 
341 	FLUSHPOSTEDWRITES(&(umdevice->lm_dev));
342 
343 	if ((rc = ddi_intr_disable(umdevice->pIntrBlock[0])) != DDI_SUCCESS) {
344 		cmn_err(CE_WARN, "%s: Failed to disable default isr (%d)",
345 		    umdevice->dev_name, rc);
346 	}
347 }
348 
349 int
bnxIntrInit(um_device_t * umdevice)350 bnxIntrInit(um_device_t *umdevice)
351 {
352 	dev_info_t *pDev = umdevice->os_param.dip;
353 	int intrActual, rc;
354 
355 	if ((umdevice->pIntrBlock = kmem_zalloc(sizeof (ddi_intr_handle_t),
356 	    KM_SLEEP)) == NULL) {
357 		cmn_err(CE_WARN, "%s: Failed to allocate interrupt handle "
358 		    "block!", umdevice->dev_name);
359 		return (-1);
360 	}
361 
362 	umdevice->intrType = (umdevice->dev_var.disableMsix) ?
363 	    DDI_INTR_TYPE_FIXED : DDI_INTR_TYPE_MSIX;
364 
365 	while (1) {
366 		if ((rc = ddi_intr_alloc(pDev, umdevice->pIntrBlock,
367 		    umdevice->intrType, 0, 1, &intrActual,
368 		    DDI_INTR_ALLOC_NORMAL)) != DDI_SUCCESS) {
369 			cmn_err(CE_WARN, "!%s: Failed to initialize default "
370 			    "%s isr handle block (%d)", umdevice->dev_name,
371 			    (umdevice->intrType == DDI_INTR_TYPE_MSIX) ?
372 			    "MSIX" : "Fixed", rc);
373 
374 			if (umdevice->intrType == DDI_INTR_TYPE_MSIX) {
375 				cmn_err(CE_WARN, "!%s: Reverting to Fixed "
376 				    "level interrupts", umdevice->dev_name);
377 
378 				umdevice->intrType = DDI_INTR_TYPE_FIXED;
379 				continue;
380 			} else {
381 				kmem_free(umdevice->pIntrBlock,
382 				    sizeof (ddi_intr_handle_t));
383 				return (-1);
384 			}
385 		}
386 		break;
387 	}
388 
389 	if (intrActual != 1) {
390 		cmn_err(CE_WARN, "%s: Failed to alloc minimum default "
391 		    "isr handler!", umdevice->dev_name);
392 		(void) ddi_intr_free(umdevice->pIntrBlock[0]);
393 		kmem_free(umdevice->pIntrBlock, sizeof (ddi_intr_handle_t));
394 		return (-1);
395 	}
396 
397 	if ((rc = ddi_intr_get_pri(umdevice->pIntrBlock[0],
398 	    &umdevice->intrPriority)) != DDI_SUCCESS) {
399 		cmn_err(CE_WARN, "%s: Failed to get isr priority (%d)",
400 		    umdevice->dev_name, rc);
401 		(void) ddi_intr_free(umdevice->pIntrBlock[0]);
402 		kmem_free(umdevice->pIntrBlock, sizeof (ddi_intr_handle_t));
403 		return (-1);
404 	}
405 
406 	if (umdevice->intrPriority >= ddi_intr_get_hilevel_pri()) {
407 		cmn_err(CE_WARN, "%s: Interrupt priority is too high",
408 		    umdevice->dev_name);
409 		(void) ddi_intr_free(umdevice->pIntrBlock[0]);
410 		kmem_free(umdevice->pIntrBlock, sizeof (ddi_intr_handle_t));
411 		return (-1);
412 	}
413 
414 	if ((rc = ddi_intr_add_handler(umdevice->pIntrBlock[0], bnx_intr_1lvl,
415 	    (caddr_t)umdevice, NULL)) != DDI_SUCCESS) {
416 		cmn_err(CE_WARN, "%s: Failed to add the default isr "
417 		    "handler (%d)", umdevice->dev_name, rc);
418 		(void) ddi_intr_free(umdevice->pIntrBlock[0]);
419 		kmem_free(umdevice->pIntrBlock, sizeof (ddi_intr_handle_t));
420 		return (-1);
421 	}
422 
423 	/* Intialize the mutex used by the hardware interrupt handler. */
424 	mutex_init(&umdevice->intr_mutex, NULL, MUTEX_DRIVER,
425 	    DDI_INTR_PRI(umdevice->intrPriority));
426 
427 	umdevice->lm_dev.vars.interrupt_mode =
428 	    (umdevice->intrType == DDI_INTR_TYPE_FIXED) ?
429 	    IRQ_MODE_LINE_BASED : IRQ_MODE_MSIX_BASED;
430 	return (0);
431 }
432 
433 void
bnxIntrFini(um_device_t * umdevice)434 bnxIntrFini(um_device_t *umdevice)
435 {
436 	int ret;
437 
438 	if ((ret = ddi_intr_disable(umdevice->pIntrBlock[0])) != 0) {
439 		dev_err(umdevice->os_param.dip, CE_WARN,
440 		    "failed to disable interrupt: %d", ret);
441 	}
442 	if ((ret = ddi_intr_remove_handler(umdevice->pIntrBlock[0])) != 0) {
443 		dev_err(umdevice->os_param.dip, CE_WARN,
444 		    "failed to remove interrupt: %d", ret);
445 	}
446 	if ((ret = ddi_intr_free(umdevice->pIntrBlock[0])) != 0) {
447 		dev_err(umdevice->os_param.dip, CE_WARN,
448 		    "failed to free interrupt: %d", ret);
449 	}
450 	kmem_free(umdevice->pIntrBlock, sizeof (ddi_intr_handle_t));
451 
452 	umdevice->pIntrBlock = NULL;
453 
454 	mutex_destroy(&umdevice->intr_mutex);
455 }
456