xref: /illumos-gate/usr/src/uts/common/io/sfxge/sfxge_ev.c (revision 49ef7e06)
1 /*
2  * Copyright (c) 2008-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/types.h>
32 #include <sys/sysmacros.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/stream.h>
36 #include <sys/strsun.h>
37 #include <sys/strsubr.h>
38 #include <sys/cpu.h>
39 #include <sys/pghw.h>
40 
41 #include "sfxge.h"
42 
43 #include "efx.h"
44 
45 
46 /* Timeout to wait for DRIVER_EV_START event at EVQ startup */
47 #define	SFXGE_EV_QSTART_TIMEOUT_USEC	(2000000)
48 
49 
50 /* Event queue DMA attributes */
51 static ddi_device_acc_attr_t sfxge_evq_devacc = {
52 
53 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version */
54 	DDI_NEVERSWAP_ACC,	/* devacc_attr_endian_flags */
55 	DDI_STRICTORDER_ACC	/* devacc_attr_dataorder */
56 };
57 
58 static ddi_dma_attr_t sfxge_evq_dma_attr = {
59 	DMA_ATTR_V0,		/* dma_attr_version	*/
60 	0,			/* dma_attr_addr_lo	*/
61 	0xffffffffffffffffull,	/* dma_attr_addr_hi	*/
62 	0xffffffffffffffffull,	/* dma_attr_count_max	*/
63 	EFX_BUF_SIZE,		/* dma_attr_align	*/
64 	0xffffffff,		/* dma_attr_burstsizes	*/
65 	1,			/* dma_attr_minxfer	*/
66 	0xffffffffffffffffull,	/* dma_attr_maxxfer	*/
67 	0xffffffffffffffffull,	/* dma_attr_seg		*/
68 	1,			/* dma_attr_sgllen	*/
69 	1,			/* dma_attr_granular	*/
70 	0			/* dma_attr_flags	*/
71 };
72 
73 static int
_sfxge_ev_qctor(sfxge_t * sp,sfxge_evq_t * sep,int kmflags,uint16_t evq_size)74 _sfxge_ev_qctor(sfxge_t *sp, sfxge_evq_t *sep, int kmflags, uint16_t evq_size)
75 {
76 	efsys_mem_t *esmp = &(sep->se_mem);
77 	sfxge_dma_buffer_attr_t dma_attr;
78 	int rc;
79 
80 	/* Compile-time structure layout checks */
81 	EFX_STATIC_ASSERT(sizeof (sep->__se_u1.__se_s1) <=
82 	    sizeof (sep->__se_u1.__se_pad));
83 	EFX_STATIC_ASSERT(sizeof (sep->__se_u2.__se_s2) <=
84 	    sizeof (sep->__se_u2.__se_pad));
85 	EFX_STATIC_ASSERT(sizeof (sep->__se_u3.__se_s3) <=
86 	    sizeof (sep->__se_u3.__se_pad));
87 
88 	bzero(sep, sizeof (sfxge_evq_t));
89 
90 	sep->se_sp = sp;
91 
92 	dma_attr.sdba_dip	 = sp->s_dip;
93 	dma_attr.sdba_dattrp	 = &sfxge_evq_dma_attr;
94 	dma_attr.sdba_callback	 = (kmflags == KM_SLEEP) ?
95 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
96 	dma_attr.sdba_length	 = EFX_EVQ_SIZE(evq_size);
97 	dma_attr.sdba_memflags	 = DDI_DMA_CONSISTENT;
98 	dma_attr.sdba_devaccp	 = &sfxge_evq_devacc;
99 	dma_attr.sdba_bindflags	 = DDI_DMA_READ | DDI_DMA_CONSISTENT;
100 	dma_attr.sdba_maxcookies = 1;
101 	dma_attr.sdba_zeroinit	 = B_FALSE;
102 
103 	if ((rc = sfxge_dma_buffer_create(esmp, &dma_attr)) != 0)
104 		goto fail1;
105 
106 	/* Allocate some buffer table entries */
107 	if ((rc = sfxge_sram_buf_tbl_alloc(sp, EFX_EVQ_NBUFS(evq_size),
108 	    &(sep->se_id))) != 0)
109 		goto fail2;
110 
111 	sep->se_stpp = &(sep->se_stp);
112 
113 	return (0);
114 
115 fail2:
116 	DTRACE_PROBE(fail2);
117 
118 	/* Tear down DMA setup */
119 	esmp->esm_addr = 0;
120 	sfxge_dma_buffer_destroy(esmp);
121 
122 fail1:
123 	DTRACE_PROBE1(fail1, int, rc);
124 
125 	sep->se_sp = NULL;
126 
127 	SFXGE_OBJ_CHECK(sep, sfxge_evq_t);
128 
129 	return (-1);
130 }
131 
132 static int
sfxge_ev_q0ctor(void * buf,void * arg,int kmflags)133 sfxge_ev_q0ctor(void *buf, void *arg, int kmflags)
134 {
135 	sfxge_evq_t *sep = buf;
136 	sfxge_t *sp = arg;
137 	return (_sfxge_ev_qctor(sp, sep, kmflags, sp->s_evq0_size));
138 }
139 
140 static int
sfxge_ev_qXctor(void * buf,void * arg,int kmflags)141 sfxge_ev_qXctor(void *buf, void *arg, int kmflags)
142 {
143 	sfxge_evq_t *sep = buf;
144 	sfxge_t *sp = arg;
145 	return (_sfxge_ev_qctor(sp, sep, kmflags, sp->s_evqX_size));
146 }
147 static void
_sfxge_ev_qdtor(sfxge_t * sp,sfxge_evq_t * sep,uint16_t evq_size)148 _sfxge_ev_qdtor(sfxge_t *sp, sfxge_evq_t *sep, uint16_t evq_size)
149 {
150 	efsys_mem_t *esmp = &(sep->se_mem);
151 	ASSERT3P(sep->se_sp, ==, sp);
152 	ASSERT3P(sep->se_stpp, ==, &(sep->se_stp));
153 	sep->se_stpp = NULL;
154 
155 	/* Free the buffer table entries */
156 	sfxge_sram_buf_tbl_free(sp, sep->se_id, EFX_EVQ_NBUFS(evq_size));
157 	sep->se_id = 0;
158 
159 	/* Tear down DMA setup */
160 	sfxge_dma_buffer_destroy(esmp);
161 
162 	sep->se_sp = NULL;
163 
164 	SFXGE_OBJ_CHECK(sep, sfxge_evq_t);
165 }
166 
167 static void
sfxge_ev_q0dtor(void * buf,void * arg)168 sfxge_ev_q0dtor(void *buf, void *arg)
169 {
170 	sfxge_evq_t *sep = buf;
171 	sfxge_t *sp = arg;
172 	_sfxge_ev_qdtor(sp, sep, sp->s_evq0_size);
173 }
174 
175 static void
sfxge_ev_qXdtor(void * buf,void * arg)176 sfxge_ev_qXdtor(void *buf, void *arg)
177 {
178 	sfxge_evq_t *sep = buf;
179 	sfxge_t *sp = arg;
180 	_sfxge_ev_qdtor(sp, sep, sp->s_evqX_size);
181 }
182 
183 static boolean_t
sfxge_ev_initialized(void * arg)184 sfxge_ev_initialized(void *arg)
185 {
186 	sfxge_evq_t *sep = arg;
187 
188 	ASSERT(mutex_owned(&(sep->se_lock)));
189 
190 	/* Init done events may be duplicated on 7xxx (see SFCbug31631) */
191 	if (sep->se_state == SFXGE_EVQ_STARTED)
192 		goto done;
193 
194 	ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTING);
195 	sep->se_state = SFXGE_EVQ_STARTED;
196 
197 	cv_broadcast(&(sep->se_init_kv));
198 
199 done:
200 	return (B_FALSE);
201 }
202 
203 static void
sfxge_ev_qcomplete(sfxge_evq_t * sep,boolean_t eop)204 sfxge_ev_qcomplete(sfxge_evq_t *sep, boolean_t eop)
205 {
206 	sfxge_t *sp = sep->se_sp;
207 	unsigned int index = sep->se_index;
208 	sfxge_rxq_t *srp = sp->s_srp[index];
209 	sfxge_txq_t *stp;
210 
211 	if ((stp = sep->se_stp) != NULL) {
212 		sep->se_stp = NULL;
213 		sep->se_stpp = &(sep->se_stp);
214 
215 		do {
216 			sfxge_txq_t *next;
217 
218 			next = stp->st_next;
219 			stp->st_next = NULL;
220 
221 			ASSERT3U(stp->st_evq, ==, index);
222 
223 			if (stp->st_pending != stp->st_completed)
224 				sfxge_tx_qcomplete(stp);
225 
226 			stp = next;
227 		} while (stp != NULL);
228 	}
229 
230 	if (srp != NULL) {
231 		if (srp->sr_pending != srp->sr_completed)
232 			sfxge_rx_qcomplete(srp, eop);
233 	}
234 }
235 
236 static boolean_t
sfxge_ev_rx(void * arg,uint32_t label,uint32_t id,uint32_t size,uint16_t flags)237 sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
238     uint16_t flags)
239 {
240 	sfxge_evq_t *sep = arg;
241 	sfxge_t *sp = sep->se_sp;
242 	sfxge_rxq_t *srp;
243 	sfxge_rx_packet_t *srpp;
244 	unsigned int prefetch;
245 	unsigned int stop;
246 	unsigned int delta;
247 
248 	ASSERT(mutex_owned(&(sep->se_lock)));
249 
250 	if (sep->se_exception)
251 		goto done;
252 
253 	srp = sp->s_srp[label];
254 	if (srp == NULL)
255 		goto done;
256 
257 	ASSERT3U(sep->se_index, ==, srp->sr_index);
258 	ASSERT3U(id, <, sp->s_rxq_size);
259 
260 	/*
261 	 * Note that in sfxge_stop() EVQ stopped after RXQ, and will be reset
262 	 * So the return missing srp->sr_pending increase is safe
263 	 */
264 	if (srp->sr_state != SFXGE_RXQ_STARTED)
265 		goto done;
266 
267 	stop = (id + 1) & (sp->s_rxq_size - 1);
268 	id = srp->sr_pending & (sp->s_rxq_size - 1);
269 
270 	delta = (stop >= id) ? (stop - id) : (sp->s_rxq_size - id + stop);
271 	srp->sr_pending += delta;
272 
273 	if (delta != 1) {
274 		if ((!efx_nic_cfg_get(sp->s_enp)->enc_rx_batching_enabled) ||
275 		    (delta == 0) ||
276 		    (delta > efx_nic_cfg_get(sp->s_enp)->enc_rx_batch_max)) {
277 			/*
278 			 * FIXME: This does not take into account scatter
279 			 * aborts.  See Bug40811
280 			 */
281 			sep->se_exception = B_TRUE;
282 
283 			DTRACE_PROBE(restart_ev_rx_id);
284 			/* sfxge_evq_t->se_lock held */
285 			(void) sfxge_restart_dispatch(sp, DDI_SLEEP,
286 			    SFXGE_HW_ERR, "Out of order RX event", delta);
287 
288 			goto done;
289 		}
290 	}
291 
292 	prefetch = (id + 4) & (sp->s_rxq_size - 1);
293 	if ((srpp = srp->sr_srpp[prefetch]) != NULL)
294 		prefetch_read_many(srpp);
295 
296 	srpp = srp->sr_srpp[id];
297 	ASSERT(srpp != NULL);
298 	prefetch_read_many(srpp->srp_mp);
299 
300 	for (; id != stop; id = (id + 1) & (sp->s_rxq_size - 1)) {
301 		srpp = srp->sr_srpp[id];
302 		ASSERT(srpp != NULL);
303 
304 		ASSERT3U(srpp->srp_flags, ==, EFX_DISCARD);
305 		srpp->srp_flags = flags;
306 
307 		ASSERT3U(size, <, (1 << 16));
308 		srpp->srp_size = (uint16_t)size;
309 	}
310 
311 	sep->se_rx++;
312 
313 	DTRACE_PROBE2(qlevel, unsigned int, srp->sr_index,
314 	    unsigned int, srp->sr_added - srp->sr_pending);
315 
316 	if (srp->sr_pending - srp->sr_completed >= SFXGE_RX_BATCH)
317 		sfxge_ev_qcomplete(sep, B_FALSE);
318 
319 done:
320 	/* returning B_TRUE makes efx_ev_qpoll() stop processing events */
321 	return (sep->se_rx >= sep->se_ev_batch);
322 }
323 
324 static boolean_t
sfxge_ev_exception(void * arg,uint32_t code,uint32_t data)325 sfxge_ev_exception(void *arg, uint32_t code, uint32_t data)
326 {
327 	sfxge_evq_t *sep = arg;
328 	sfxge_t *sp = sep->se_sp;
329 
330 	_NOTE(ARGUNUSED(code))
331 	_NOTE(ARGUNUSED(data))
332 
333 	ASSERT(mutex_owned(&(sep->se_lock)));
334 	sep->se_exception = B_TRUE;
335 
336 	if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) {
337 
338 		DTRACE_PROBE(restart_ev_exception);
339 
340 		/* sfxge_evq_t->se_lock held */
341 		(void) sfxge_restart_dispatch(sp, DDI_SLEEP, SFXGE_HW_ERR,
342 		    "Unknown EV", code);
343 	}
344 
345 	return (B_FALSE);
346 }
347 
348 static boolean_t
sfxge_ev_rxq_flush_done(void * arg,uint32_t rxq_index)349 sfxge_ev_rxq_flush_done(void *arg, uint32_t rxq_index)
350 {
351 	sfxge_evq_t *sep_targetq, *sep = arg;
352 	sfxge_t *sp = sep->se_sp;
353 	sfxge_rxq_t *srp;
354 	unsigned int index;
355 	unsigned int label;
356 	uint16_t magic;
357 
358 	ASSERT(mutex_owned(&(sep->se_lock)));
359 
360 	/* Ensure RXQ exists, as events may arrive after RXQ was destroyed */
361 	srp = sp->s_srp[rxq_index];
362 	if (srp == NULL)
363 		goto done;
364 
365 	/* Process right now if it is the correct event queue */
366 	index = srp->sr_index;
367 	if (index == sep->se_index) {
368 		sfxge_rx_qflush_done(srp);
369 		goto done;
370 	}
371 
372 	/* Resend a software event on the correct queue */
373 	sep_targetq = sp->s_sep[index];
374 
375 	if (sep_targetq->se_state != SFXGE_EVQ_STARTED)
376 		goto done; /* TBD: state test not under the lock */
377 
378 	label = rxq_index;
379 	ASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label);
380 	magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label;
381 
382 	efx_ev_qpost(sep_targetq->se_eep, magic);
383 
384 done:
385 	return (B_FALSE);
386 }
387 
388 static boolean_t
sfxge_ev_rxq_flush_failed(void * arg,uint32_t rxq_index)389 sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index)
390 {
391 	sfxge_evq_t *sep_targetq, *sep = arg;
392 	sfxge_t *sp = sep->se_sp;
393 	sfxge_rxq_t *srp;
394 	unsigned int index;
395 	unsigned int label;
396 	uint16_t magic;
397 
398 	ASSERT(mutex_owned(&(sep->se_lock)));
399 
400 	/* Ensure RXQ exists, as events may arrive after RXQ was destroyed */
401 	srp = sp->s_srp[rxq_index];
402 	if (srp == NULL)
403 		goto done;
404 
405 	/* Process right now if it is the correct event queue */
406 	index = srp->sr_index;
407 	if (index == sep->se_index) {
408 		sfxge_rx_qflush_failed(srp);
409 		goto done;
410 	}
411 
412 	/* Resend a software event on the correct queue */
413 	sep_targetq = sp->s_sep[index];
414 
415 	label = rxq_index;
416 	ASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label);
417 	magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;
418 
419 	if (sep_targetq->se_state != SFXGE_EVQ_STARTED)
420 		goto done; /* TBD: state test not under the lock */
421 
422 	efx_ev_qpost(sep_targetq->se_eep, magic);
423 
424 done:
425 	return (B_FALSE);
426 }
427 
428 static boolean_t
sfxge_ev_tx(void * arg,uint32_t label,uint32_t id)429 sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
430 {
431 	sfxge_evq_t *sep = arg;
432 	sfxge_txq_t *stp;
433 	unsigned int stop;
434 	unsigned int delta;
435 
436 	ASSERT(mutex_owned(&(sep->se_lock)));
437 
438 	stp = sep->se_label_stp[label];
439 	if (stp == NULL)
440 		goto done;
441 
442 	if (stp->st_state != SFXGE_TXQ_STARTED)
443 		goto done;
444 
445 	ASSERT3U(sep->se_index, ==, stp->st_evq);
446 
447 	stop = (id + 1) & (SFXGE_TX_NDESCS - 1);
448 	id = stp->st_pending & (SFXGE_TX_NDESCS - 1);
449 
450 	delta = (stop >= id) ? (stop - id) : (SFXGE_TX_NDESCS - id + stop);
451 	stp->st_pending += delta;
452 
453 	sep->se_tx++;
454 
455 	if (stp->st_next == NULL &&
456 	    sep->se_stpp != &(stp->st_next)) {
457 		*(sep->se_stpp) = stp;
458 		sep->se_stpp = &(stp->st_next);
459 	}
460 
461 	DTRACE_PROBE2(qlevel, unsigned int, stp->st_index,
462 	    unsigned int, stp->st_added - stp->st_pending);
463 
464 	if (stp->st_pending - stp->st_completed >= SFXGE_TX_BATCH)
465 		sfxge_tx_qcomplete(stp);
466 
467 done:
468 	/* returning B_TRUE makes efx_ev_qpoll() stop processing events */
469 	return (sep->se_tx >= sep->se_ev_batch);
470 }
471 
472 static boolean_t
sfxge_ev_txq_flush_done(void * arg,uint32_t txq_index)473 sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index)
474 {
475 	sfxge_evq_t *sep = arg;
476 	sfxge_t *sp = sep->se_sp;
477 	sfxge_txq_t *stp;
478 	unsigned int evq;
479 	unsigned int label;
480 	uint16_t magic;
481 
482 	ASSERT(mutex_owned(&(sep->se_lock)));
483 
484 	/* Ensure TXQ exists, as events may arrive after TXQ was destroyed */
485 	stp = sp->s_stp[txq_index];
486 	if (stp == NULL)
487 		goto done;
488 
489 	/* Process right now if it is the correct event queue */
490 	evq = stp->st_evq;
491 	if (evq == sep->se_index) {
492 		sfxge_tx_qflush_done(stp);
493 		goto done;
494 	}
495 
496 	/* Resend a software event on the correct queue */
497 	sep = sp->s_sep[evq];
498 
499 	label = stp->st_label;
500 
501 	ASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label);
502 	magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;
503 
504 	ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
505 	efx_ev_qpost(sep->se_eep, magic);
506 
507 done:
508 	return (B_FALSE);
509 }
510 
511 static boolean_t
sfxge_ev_software(void * arg,uint16_t magic)512 sfxge_ev_software(void *arg, uint16_t magic)
513 {
514 	sfxge_evq_t *sep = arg;
515 	sfxge_t *sp = sep->se_sp;
516 	dev_info_t *dip = sp->s_dip;
517 	unsigned int label;
518 
519 	ASSERT(mutex_owned(&(sep->se_lock)));
520 
521 	EFX_STATIC_ASSERT(SFXGE_MAGIC_DMAQ_LABEL_WIDTH ==
522 	    FSF_AZ_RX_EV_Q_LABEL_WIDTH);
523 	EFX_STATIC_ASSERT(SFXGE_MAGIC_DMAQ_LABEL_WIDTH ==
524 	    FSF_AZ_TX_EV_Q_LABEL_WIDTH);
525 
526 	label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK;
527 	magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;
528 
529 	switch (magic) {
530 	case SFXGE_MAGIC_RX_QFLUSH_DONE: {
531 		sfxge_rxq_t *srp = sp->s_srp[label];
532 
533 		if (srp != NULL) {
534 			ASSERT3U(sep->se_index, ==, srp->sr_index);
535 
536 			sfxge_rx_qflush_done(srp);
537 		}
538 		break;
539 	}
540 	case SFXGE_MAGIC_RX_QFLUSH_FAILED: {
541 		sfxge_rxq_t *srp = sp->s_srp[label];
542 
543 		if (srp != NULL) {
544 			ASSERT3U(sep->se_index, ==, srp->sr_index);
545 
546 			sfxge_rx_qflush_failed(srp);
547 		}
548 		break;
549 	}
550 	case SFXGE_MAGIC_RX_QFPP_TRIM: {
551 		sfxge_rxq_t *srp = sp->s_srp[label];
552 
553 		if (srp != NULL) {
554 			ASSERT3U(sep->se_index, ==, srp->sr_index);
555 
556 			sfxge_rx_qfpp_trim(srp);
557 		}
558 		break;
559 	}
560 	case SFXGE_MAGIC_TX_QFLUSH_DONE: {
561 		sfxge_txq_t *stp = sep->se_label_stp[label];
562 
563 		if (stp != NULL) {
564 			ASSERT3U(sep->se_index, ==, stp->st_evq);
565 
566 			sfxge_tx_qflush_done(stp);
567 		}
568 		break;
569 	}
570 	default:
571 		dev_err(dip, CE_NOTE,
572 		    SFXGE_CMN_ERR "unknown software event 0x%x", magic);
573 		break;
574 	}
575 
576 	return (B_FALSE);
577 }
578 
579 static boolean_t
sfxge_ev_sram(void * arg,uint32_t code)580 sfxge_ev_sram(void *arg, uint32_t code)
581 {
582 	_NOTE(ARGUNUSED(arg))
583 
584 	switch (code) {
585 	case EFX_SRAM_UPDATE:
586 		DTRACE_PROBE(sram_update);
587 		break;
588 
589 	case EFX_SRAM_CLEAR:
590 		DTRACE_PROBE(sram_clear);
591 		break;
592 
593 	case EFX_SRAM_ILLEGAL_CLEAR:
594 		DTRACE_PROBE(sram_illegal_clear);
595 		break;
596 
597 	default:
598 		ASSERT(B_FALSE);
599 		break;
600 	}
601 
602 	return (B_FALSE);
603 }
604 
605 static boolean_t
sfxge_ev_timer(void * arg,uint32_t index)606 sfxge_ev_timer(void *arg, uint32_t index)
607 {
608 	_NOTE(ARGUNUSED(arg, index))
609 
610 	return (B_FALSE);
611 }
612 
613 static boolean_t
sfxge_ev_wake_up(void * arg,uint32_t index)614 sfxge_ev_wake_up(void *arg, uint32_t index)
615 {
616 	_NOTE(ARGUNUSED(arg, index))
617 
618 	return (B_FALSE);
619 }
620 
621 static boolean_t
sfxge_ev_link_change(void * arg,efx_link_mode_t link_mode)622 sfxge_ev_link_change(void *arg, efx_link_mode_t	link_mode)
623 {
624 	sfxge_evq_t *sep = arg;
625 	sfxge_t *sp = sep->se_sp;
626 
627 	sfxge_mac_link_update(sp, link_mode);
628 
629 	return (B_FALSE);
630 }
631 
632 static int
sfxge_ev_kstat_update(kstat_t * ksp,int rw)633 sfxge_ev_kstat_update(kstat_t *ksp, int rw)
634 {
635 	sfxge_evq_t *sep = ksp->ks_private;
636 	kstat_named_t *knp;
637 	int rc;
638 
639 	if (rw != KSTAT_READ) {
640 		rc = EACCES;
641 		goto fail1;
642 	}
643 
644 	ASSERT(mutex_owned(&(sep->se_lock)));
645 
646 	if (sep->se_state != SFXGE_EVQ_STARTED)
647 		goto done;
648 
649 	efx_ev_qstats_update(sep->se_eep, sep->se_stat);
650 
651 	knp = ksp->ks_data;
652 	knp += EV_NQSTATS;
653 
654 	knp->value.ui64 = sep->se_cpu_id;
655 
656 done:
657 	return (0);
658 
659 fail1:
660 	DTRACE_PROBE1(fail1, int, rc);
661 
662 	return (rc);
663 }
664 
665 static int
sfxge_ev_kstat_init(sfxge_evq_t * sep)666 sfxge_ev_kstat_init(sfxge_evq_t *sep)
667 {
668 	sfxge_t *sp = sep->se_sp;
669 	unsigned int index = sep->se_index;
670 	dev_info_t *dip = sp->s_dip;
671 	kstat_t *ksp;
672 	kstat_named_t *knp;
673 	char name[MAXNAMELEN];
674 	unsigned int id;
675 	int rc;
676 
677 	/* Determine the name */
678 	(void) snprintf(name, MAXNAMELEN - 1, "%s_evq%04d",
679 	    ddi_driver_name(dip), index);
680 
681 	/* Create the set */
682 	if ((ksp = kstat_create((char *)ddi_driver_name(dip),
683 	    ddi_get_instance(dip), name, "queue", KSTAT_TYPE_NAMED,
684 	    EV_NQSTATS + 1, 0)) == NULL) {
685 		rc = ENOMEM;
686 		goto fail1;
687 	}
688 
689 	sep->se_ksp = ksp;
690 
691 	ksp->ks_update = sfxge_ev_kstat_update;
692 	ksp->ks_private = sep;
693 	ksp->ks_lock = &(sep->se_lock);
694 
695 	/* Initialise the named stats */
696 	sep->se_stat = knp = ksp->ks_data;
697 	for (id = 0; id < EV_NQSTATS; id++) {
698 		kstat_named_init(knp, (char *)efx_ev_qstat_name(sp->s_enp, id),
699 		    KSTAT_DATA_UINT64);
700 		knp++;
701 	}
702 
703 	kstat_named_init(knp, "cpu", KSTAT_DATA_UINT64);
704 
705 	kstat_install(ksp);
706 	return (0);
707 
708 fail1:
709 	DTRACE_PROBE1(fail1, int, rc);
710 
711 	return (rc);
712 }
713 
714 static void
sfxge_ev_kstat_fini(sfxge_evq_t * sep)715 sfxge_ev_kstat_fini(sfxge_evq_t *sep)
716 {
717 	/* Destroy the set */
718 	kstat_delete(sep->se_ksp);
719 	sep->se_ksp = NULL;
720 	sep->se_stat = NULL;
721 }
722 
pow2_ge(unsigned int n)723 inline unsigned pow2_ge(unsigned int n) {
724 	unsigned int order = 0;
725 	ASSERT3U(n, >, 0);
726 	while ((1ul << order) < n) ++order;
727 	return (1ul << (order));
728 }
729 
730 static int
sfxge_ev_qinit(sfxge_t * sp,unsigned int index,unsigned int ev_batch)731 sfxge_ev_qinit(sfxge_t *sp, unsigned int index, unsigned int ev_batch)
732 {
733 	sfxge_evq_t *sep;
734 	int rc;
735 
736 	ASSERT3U(index, <, SFXGE_RX_SCALE_MAX);
737 
738 	sep = kmem_cache_alloc(index ? sp->s_eqXc : sp->s_eq0c, KM_SLEEP);
739 	if (sep == NULL) {
740 		rc = ENOMEM;
741 		goto fail1;
742 	}
743 	ASSERT3U(sep->se_state, ==, SFXGE_EVQ_UNINITIALIZED);
744 
745 	sep->se_index = index;
746 
747 	mutex_init(&(sep->se_lock), NULL,
748 	    MUTEX_DRIVER, DDI_INTR_PRI(sp->s_intr.si_intr_pri));
749 
750 	cv_init(&(sep->se_init_kv), NULL, CV_DRIVER, NULL);
751 
752 	/* Initialize the statistics */
753 	if ((rc = sfxge_ev_kstat_init(sep)) != 0)
754 		goto fail2;
755 
756 	sep->se_state = SFXGE_EVQ_INITIALIZED;
757 	sep->se_ev_batch = (uint16_t)ev_batch;
758 	sp->s_sep[index] = sep;
759 
760 	return (0);
761 
762 fail2:
763 	DTRACE_PROBE(fail2);
764 
765 	sep->se_index = 0;
766 
767 	cv_destroy(&(sep->se_init_kv));
768 	mutex_destroy(&(sep->se_lock));
769 
770 	kmem_cache_free(index ? sp->s_eqXc : sp->s_eq0c, sep);
771 
772 fail1:
773 	DTRACE_PROBE1(fail1, int, rc);
774 
775 	return (rc);
776 }
777 
778 static int
sfxge_ev_qstart(sfxge_t * sp,unsigned int index)779 sfxge_ev_qstart(sfxge_t *sp, unsigned int index)
780 {
781 	sfxge_evq_t *sep = sp->s_sep[index];
782 	sfxge_intr_t *sip = &(sp->s_intr);
783 	efx_nic_t *enp = sp->s_enp;
784 	efx_ev_callbacks_t *eecp;
785 	efsys_mem_t *esmp;
786 	clock_t timeout;
787 	int rc;
788 	uint16_t evq_size = index ? sp->s_evqX_size : sp->s_evq0_size;
789 
790 	mutex_enter(&(sep->se_lock));
791 	esmp = &(sep->se_mem);
792 
793 	ASSERT3U(sep->se_state, ==, SFXGE_EVQ_INITIALIZED);
794 
795 	/* Set the memory to all ones */
796 	(void) memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq_size));
797 
798 	/* Program the buffer table */
799 	if ((rc = sfxge_sram_buf_tbl_set(sp, sep->se_id, esmp,
800 	    EFX_EVQ_NBUFS(evq_size))) != 0)
801 		goto fail1;
802 
803 	/* Set up the event callbacks */
804 	eecp = &(sep->se_eec);
805 	eecp->eec_initialized = sfxge_ev_initialized;
806 	eecp->eec_rx = sfxge_ev_rx;
807 	eecp->eec_tx = sfxge_ev_tx;
808 	eecp->eec_exception = sfxge_ev_exception;
809 	eecp->eec_rxq_flush_done = sfxge_ev_rxq_flush_done;
810 	eecp->eec_rxq_flush_failed = sfxge_ev_rxq_flush_failed;
811 	eecp->eec_txq_flush_done = sfxge_ev_txq_flush_done;
812 	eecp->eec_software = sfxge_ev_software;
813 	eecp->eec_sram = sfxge_ev_sram;
814 	eecp->eec_wake_up = sfxge_ev_wake_up;
815 	eecp->eec_timer = sfxge_ev_timer;
816 	eecp->eec_link_change = sfxge_ev_link_change;
817 
818 	/* Create the event queue */
819 	if ((rc = efx_ev_qcreate(enp, index, esmp, evq_size, sep->se_id,
820 	    &(sep->se_eep))) != 0)
821 		goto fail2;
822 
823 	/* Set the default moderation */
824 	if ((rc = efx_ev_qmoderate(sep->se_eep, sp->s_ev_moderation)) != 0)
825 		goto fail3;
826 
827 	/* Check that interrupts are enabled at the NIC */
828 	if (sip->si_state != SFXGE_INTR_STARTED) {
829 		rc = EINVAL;
830 		goto fail4;
831 	}
832 
833 	sep->se_state = SFXGE_EVQ_STARTING;
834 
835 	/* Prime the event queue for interrupts */
836 	if ((rc = efx_ev_qprime(sep->se_eep, sep->se_count)) != 0)
837 		goto fail5;
838 
839 	/* Wait for the initialization event */
840 	timeout = ddi_get_lbolt() + drv_usectohz(SFXGE_EV_QSTART_TIMEOUT_USEC);
841 	while (sep->se_state != SFXGE_EVQ_STARTED) {
842 		if (cv_timedwait(&(sep->se_init_kv), &(sep->se_lock),
843 		    timeout) < 0) {
844 			/* Timeout waiting for initialization */
845 			dev_info_t *dip = sp->s_dip;
846 
847 			DTRACE_PROBE(timeout);
848 			dev_err(dip, CE_NOTE,
849 			    SFXGE_CMN_ERR "evq[%d] qstart timeout", index);
850 
851 			rc = ETIMEDOUT;
852 			goto fail6;
853 		}
854 	}
855 
856 	mutex_exit(&(sep->se_lock));
857 	return (0);
858 
859 fail6:
860 	DTRACE_PROBE(fail6);
861 
862 fail5:
863 	DTRACE_PROBE(fail5);
864 
865 	sep->se_state = SFXGE_EVQ_INITIALIZED;
866 
867 fail4:
868 	DTRACE_PROBE(fail4);
869 
870 fail3:
871 	DTRACE_PROBE(fail3);
872 
873 	/* Destroy the event queue */
874 	efx_ev_qdestroy(sep->se_eep);
875 	sep->se_eep = NULL;
876 
877 fail2:
878 	DTRACE_PROBE(fail2);
879 
880 	/* Zero out the event handlers */
881 	bzero(&(sep->se_eec), sizeof (efx_ev_callbacks_t));
882 
883 	/* Clear entries from the buffer table */
884 	sfxge_sram_buf_tbl_clear(sp, sep->se_id, EFX_EVQ_NBUFS(evq_size));
885 
886 fail1:
887 	DTRACE_PROBE1(fail1, int, rc);
888 
889 	mutex_exit(&(sep->se_lock));
890 
891 	return (rc);
892 }
893 
894 int
sfxge_ev_qpoll(sfxge_t * sp,unsigned int index)895 sfxge_ev_qpoll(sfxge_t *sp, unsigned int index)
896 {
897 	sfxge_evq_t *sep = sp->s_sep[index];
898 	processorid_t cpu_id;
899 	int rc;
900 	uint16_t evq_size = index ? sp->s_evqX_size : sp->s_evq0_size;
901 
902 	mutex_enter(&(sep->se_lock));
903 
904 	if (sep->se_state != SFXGE_EVQ_STARTING &&
905 	    sep->se_state != SFXGE_EVQ_STARTED) {
906 		rc = EINVAL;
907 		goto fail1;
908 	}
909 
910 	/* Make sure the CPU information is up to date */
911 	cpu_id = CPU->cpu_id;
912 
913 	if (cpu_id != sep->se_cpu_id) {
914 		sep->se_cpu_id = cpu_id;
915 
916 		/* sfxge_evq_t->se_lock held */
917 		(void) ddi_taskq_dispatch(sp->s_tqp, sfxge_rx_scale_update, sp,
918 		    DDI_NOSLEEP);
919 	}
920 
921 	/* Synchronize the DMA memory for reading */
922 	(void) ddi_dma_sync(sep->se_mem.esm_dma_handle,
923 	    0,
924 	    EFX_EVQ_SIZE(evq_size),
925 	    DDI_DMA_SYNC_FORKERNEL);
926 
927 	ASSERT3U(sep->se_rx, ==, 0);
928 	ASSERT3U(sep->se_tx, ==, 0);
929 	ASSERT3P(sep->se_stp, ==, NULL);
930 	ASSERT3P(sep->se_stpp, ==, &(sep->se_stp));
931 
932 	/* Poll the queue */
933 	efx_ev_qpoll(sep->se_eep, &(sep->se_count), &(sep->se_eec),
934 	    sep);
935 
936 	sep->se_rx = 0;
937 	sep->se_tx = 0;
938 
939 	/* Perform any pending completion processing */
940 	sfxge_ev_qcomplete(sep, B_TRUE);
941 
942 	/* Re-prime the event queue for interrupts */
943 	if ((rc = efx_ev_qprime(sep->se_eep, sep->se_count)) != 0)
944 		goto fail2;
945 
946 	mutex_exit(&(sep->se_lock));
947 
948 	return (0);
949 
950 fail2:
951 	DTRACE_PROBE(fail2);
952 fail1:
953 	DTRACE_PROBE1(fail1, int, rc);
954 
955 	mutex_exit(&(sep->se_lock));
956 
957 	return (rc);
958 }
959 
960 int
sfxge_ev_qprime(sfxge_t * sp,unsigned int index)961 sfxge_ev_qprime(sfxge_t *sp, unsigned int index)
962 {
963 	sfxge_evq_t *sep = sp->s_sep[index];
964 	int rc;
965 
966 	mutex_enter(&(sep->se_lock));
967 
968 	if (sep->se_state != SFXGE_EVQ_STARTING &&
969 	    sep->se_state != SFXGE_EVQ_STARTED) {
970 		rc = EINVAL;
971 		goto fail1;
972 	}
973 
974 	if ((rc = efx_ev_qprime(sep->se_eep, sep->se_count)) != 0)
975 		goto fail2;
976 
977 	mutex_exit(&(sep->se_lock));
978 
979 	return (0);
980 
981 fail2:
982 	DTRACE_PROBE(fail2);
983 fail1:
984 	DTRACE_PROBE1(fail1, int, rc);
985 
986 	mutex_exit(&(sep->se_lock));
987 
988 	return (rc);
989 }
990 
991 
992 int
sfxge_ev_qmoderate(sfxge_t * sp,unsigned int index,unsigned int us)993 sfxge_ev_qmoderate(sfxge_t *sp, unsigned int index, unsigned int us)
994 {
995 	sfxge_evq_t *sep = sp->s_sep[index];
996 	efx_evq_t *eep = sep->se_eep;
997 
998 	ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
999 
1000 	return (efx_ev_qmoderate(eep, us));
1001 }
1002 
1003 static void
sfxge_ev_qstop(sfxge_t * sp,unsigned int index)1004 sfxge_ev_qstop(sfxge_t *sp, unsigned int index)
1005 {
1006 	sfxge_evq_t *sep = sp->s_sep[index];
1007 	uint16_t evq_size;
1008 
1009 	mutex_enter(&(sep->se_lock));
1010 	ASSERT3U(sep->se_state, ==, SFXGE_EVQ_STARTED);
1011 	sep->se_state = SFXGE_EVQ_INITIALIZED;
1012 	evq_size = index ? sp->s_evqX_size : sp->s_evq0_size;
1013 
1014 	/* Clear the CPU information */
1015 	sep->se_cpu_id = 0;
1016 
1017 	/* Clear the event count */
1018 	sep->se_count = 0;
1019 
1020 	/* Reset the exception flag */
1021 	sep->se_exception = B_FALSE;
1022 
1023 	/* Destroy the event queue */
1024 	efx_ev_qdestroy(sep->se_eep);
1025 	sep->se_eep = NULL;
1026 
1027 	mutex_exit(&(sep->se_lock));
1028 
1029 	/* Zero out the event handlers */
1030 	bzero(&(sep->se_eec), sizeof (efx_ev_callbacks_t));
1031 
1032 	/* Clear entries from the buffer table */
1033 	sfxge_sram_buf_tbl_clear(sp, sep->se_id, EFX_EVQ_NBUFS(evq_size));
1034 }
1035 
1036 static void
sfxge_ev_qfini(sfxge_t * sp,unsigned int index)1037 sfxge_ev_qfini(sfxge_t *sp, unsigned int index)
1038 {
1039 	sfxge_evq_t *sep = sp->s_sep[index];
1040 
1041 	ASSERT3U(sep->se_state, ==, SFXGE_EVQ_INITIALIZED);
1042 
1043 	sp->s_sep[index] = NULL;
1044 	sep->se_state = SFXGE_EVQ_UNINITIALIZED;
1045 
1046 	/* Tear down the statistics */
1047 	sfxge_ev_kstat_fini(sep);
1048 
1049 	cv_destroy(&(sep->se_init_kv));
1050 	mutex_destroy(&(sep->se_lock));
1051 
1052 	sep->se_index = 0;
1053 
1054 	kmem_cache_free(index ? sp->s_eqXc : sp->s_eq0c, sep);
1055 }
1056 
1057 int
sfxge_ev_txlabel_alloc(sfxge_t * sp,unsigned int evq,sfxge_txq_t * stp,unsigned int * labelp)1058 sfxge_ev_txlabel_alloc(sfxge_t *sp, unsigned int evq, sfxge_txq_t *stp,
1059     unsigned int *labelp)
1060 {
1061 	sfxge_evq_t *sep = sp->s_sep[evq];
1062 	sfxge_txq_t **stpp;
1063 	unsigned int label;
1064 	int rc;
1065 
1066 	mutex_enter(&(sep->se_lock));
1067 
1068 	if (stp == NULL || labelp == NULL) {
1069 		rc = EINVAL;
1070 		goto fail1;
1071 	}
1072 
1073 	stpp = NULL;
1074 	for (label = 0; label < SFXGE_TX_NLABELS; label++) {
1075 		if (sep->se_label_stp[label] == stp) {
1076 			rc = EEXIST;
1077 			goto fail2;
1078 		}
1079 		if ((stpp == NULL) && (sep->se_label_stp[label] == NULL)) {
1080 			stpp = &sep->se_label_stp[label];
1081 		}
1082 	}
1083 	if (stpp == NULL) {
1084 		rc = ENOSPC;
1085 		goto fail3;
1086 	}
1087 	*stpp = stp;
1088 	label = stpp - sep->se_label_stp;
1089 
1090 	ASSERT3U(label, <, SFXGE_TX_NLABELS);
1091 	*labelp = label;
1092 
1093 	mutex_exit(&(sep->se_lock));
1094 	return (0);
1095 
1096 fail3:
1097 	DTRACE_PROBE(fail3);
1098 fail2:
1099 	DTRACE_PROBE(fail2);
1100 fail1:
1101 	DTRACE_PROBE1(fail1, int, rc);
1102 
1103 	mutex_exit(&(sep->se_lock));
1104 
1105 	return (rc);
1106 }
1107 
1108 
1109 int
sfxge_ev_txlabel_free(sfxge_t * sp,unsigned int evq,sfxge_txq_t * stp,unsigned int label)1110 sfxge_ev_txlabel_free(sfxge_t *sp, unsigned int evq, sfxge_txq_t *stp,
1111     unsigned int label)
1112 {
1113 	sfxge_evq_t *sep = sp->s_sep[evq];
1114 	int rc;
1115 
1116 	mutex_enter(&(sep->se_lock));
1117 
1118 	if (stp == NULL || label > SFXGE_TX_NLABELS) {
1119 		rc = EINVAL;
1120 		goto fail1;
1121 	}
1122 
1123 	if (sep->se_label_stp[label] != stp) {
1124 		rc = EINVAL;
1125 		goto fail2;
1126 	}
1127 	sep->se_label_stp[label] = NULL;
1128 
1129 	mutex_exit(&(sep->se_lock));
1130 
1131 	return (0);
1132 
1133 fail2:
1134 	DTRACE_PROBE(fail2);
1135 fail1:
1136 	DTRACE_PROBE1(fail1, int, rc);
1137 
1138 	mutex_exit(&(sep->se_lock));
1139 
1140 	return (rc);
1141 }
1142 
1143 
1144 static 	kmem_cache_t *
sfxge_ev_kmem_cache_create(sfxge_t * sp,const char * qname,int (* ctor)(void *,void *,int),void (* dtor)(void *,void *))1145 sfxge_ev_kmem_cache_create(sfxge_t *sp, const char *qname,
1146     int (*ctor)(void *, void *, int), void (*dtor)(void *, void *))
1147 {
1148 	char name[MAXNAMELEN];
1149 	kmem_cache_t *eqc;
1150 
1151 	(void) snprintf(name, MAXNAMELEN - 1, "%s%d_%s_cache",
1152 	    ddi_driver_name(sp->s_dip), ddi_get_instance(sp->s_dip), qname);
1153 
1154 	eqc = kmem_cache_create(name, sizeof (sfxge_evq_t),
1155 	    SFXGE_CPU_CACHE_SIZE, ctor, dtor, NULL, sp, NULL, 0);
1156 	ASSERT(eqc != NULL);
1157 	return (eqc);
1158 }
1159 
1160 int
sfxge_ev_init(sfxge_t * sp)1161 sfxge_ev_init(sfxge_t *sp)
1162 {
1163 	sfxge_intr_t *sip = &(sp->s_intr);
1164 	unsigned int evq0_size;
1165 	unsigned int evqX_size;
1166 	unsigned int ev_batch;
1167 	int index;
1168 	int rc;
1169 
1170 	ASSERT3U(sip->si_state, ==, SFXGE_INTR_INITIALIZED);
1171 
1172 	/*
1173 	 * Must account for RXQ, TXQ(s); MCDI not event completed at present
1174 	 * Note that common code does not completely fill descriptor queues
1175 	 */
1176 	evqX_size = sp->s_rxq_size + SFXGE_TX_NDESCS;
1177 	evq0_size = evqX_size + SFXGE_TX_NDESCS; /* only IP checksum TXQ */
1178 	evq0_size += SFXGE_TX_NDESCS; /* no checksums */
1179 
1180 	ASSERT3U(evqX_size, >=, EFX_EVQ_MINNEVS);
1181 	ASSERT3U(evq0_size, >, evqX_size);
1182 
1183 	if (evq0_size > EFX_EVQ_MAXNEVS) {
1184 		rc = EINVAL;
1185 		goto fail1;
1186 	}
1187 
1188 	sp->s_evq0_size = pow2_ge(evq0_size);
1189 	sp->s_evqX_size = pow2_ge(evqX_size);
1190 
1191 	/* Read driver parameters */
1192 	sp->s_ev_moderation = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
1193 	    DDI_PROP_DONTPASS, "intr_moderation", SFXGE_DEFAULT_MODERATION);
1194 
1195 	ev_batch = ddi_prop_get_int(DDI_DEV_T_ANY, sp->s_dip,
1196 	    DDI_PROP_DONTPASS, "ev_batch", SFXGE_EV_BATCH);
1197 
1198 	/*
1199 	 * It is slightly peverse to have a cache for one item. But it allows
1200 	 * for simple alignment control without increasing the allocation size
1201 	 */
1202 	sp->s_eq0c = sfxge_ev_kmem_cache_create(sp, "evq0", sfxge_ev_q0ctor,
1203 	    sfxge_ev_q0dtor);
1204 	sp->s_eqXc = sfxge_ev_kmem_cache_create(sp, "evqX", sfxge_ev_qXctor,
1205 	    sfxge_ev_qXdtor);
1206 
1207 	/* Initialize the event queue(s) */
1208 	for (index = 0; index < sip->si_nalloc; index++) {
1209 		if ((rc = sfxge_ev_qinit(sp, index, ev_batch)) != 0)
1210 			goto fail2;
1211 	}
1212 
1213 	return (0);
1214 
1215 fail2:
1216 	DTRACE_PROBE(fail2);
1217 
1218 	while (--index >= 0)
1219 		sfxge_ev_qfini(sp, index);
1220 	sp->s_ev_moderation = 0;
1221 
1222 fail1:
1223 	DTRACE_PROBE1(fail1, int, rc);
1224 
1225 	kmem_cache_destroy(sp->s_eqXc);
1226 	kmem_cache_destroy(sp->s_eq0c);
1227 	sp->s_eqXc = NULL;
1228 	sp->s_eq0c = NULL;
1229 
1230 	return (rc);
1231 }
1232 
1233 int
sfxge_ev_start(sfxge_t * sp)1234 sfxge_ev_start(sfxge_t *sp)
1235 {
1236 	sfxge_intr_t *sip = &(sp->s_intr);
1237 	efx_nic_t *enp = sp->s_enp;
1238 	int index;
1239 	int rc;
1240 
1241 	ASSERT3U(sip->si_state, ==, SFXGE_INTR_STARTED);
1242 
1243 	/* Initialize the event module */
1244 	if ((rc = efx_ev_init(enp)) != 0)
1245 		goto fail1;
1246 
1247 	/* Start the event queues */
1248 	for (index = 0; index < sip->si_nalloc; index++) {
1249 		if ((rc = sfxge_ev_qstart(sp, index)) != 0)
1250 			goto fail2;
1251 	}
1252 
1253 	return (0);
1254 
1255 fail2:
1256 	DTRACE_PROBE(fail2);
1257 
1258 	/* Stop the event queue(s) */
1259 	while (--index >= 0)
1260 		sfxge_ev_qstop(sp, index);
1261 
1262 	/* Tear down the event module */
1263 	efx_ev_fini(enp);
1264 
1265 fail1:
1266 	DTRACE_PROBE1(fail1, int, rc);
1267 
1268 	return (rc);
1269 }
1270 
1271 void
sfxge_ev_moderation_get(sfxge_t * sp,unsigned int * usp)1272 sfxge_ev_moderation_get(sfxge_t *sp, unsigned int *usp)
1273 {
1274 	*usp = sp->s_ev_moderation;
1275 }
1276 
1277 int
sfxge_ev_moderation_set(sfxge_t * sp,unsigned int us)1278 sfxge_ev_moderation_set(sfxge_t *sp, unsigned int us)
1279 {
1280 	sfxge_intr_t *sip = &(sp->s_intr);
1281 	int index;
1282 	int rc;
1283 
1284 	if (sip->si_state != SFXGE_INTR_STARTED)
1285 		return (ENODEV);
1286 
1287 	for (index = 0; index < sip->si_nalloc; index++) {
1288 		if ((rc = sfxge_ev_qmoderate(sp, index, us)) != 0)
1289 			goto fail1;
1290 	}
1291 
1292 	sp->s_ev_moderation = us;
1293 	return (0);
1294 
1295 fail1:
1296 	DTRACE_PROBE1(fail1, int, rc);
1297 
1298 	/*  The only error path is if the value to set to is invalid. */
1299 	ASSERT3U(index, ==, 0);
1300 
1301 	return (rc);
1302 }
1303 
1304 void
sfxge_ev_stop(sfxge_t * sp)1305 sfxge_ev_stop(sfxge_t *sp)
1306 {
1307 	sfxge_intr_t *sip = &(sp->s_intr);
1308 	efx_nic_t *enp = sp->s_enp;
1309 	int index;
1310 
1311 	ASSERT3U(sip->si_state, ==, SFXGE_INTR_STARTED);
1312 
1313 	/* Stop the event queue(s) */
1314 	index = sip->si_nalloc;
1315 	while (--index >= 0)
1316 		sfxge_ev_qstop(sp, index);
1317 
1318 	/* Tear down the event module */
1319 	efx_ev_fini(enp);
1320 }
1321 
1322 void
sfxge_ev_fini(sfxge_t * sp)1323 sfxge_ev_fini(sfxge_t *sp)
1324 {
1325 	sfxge_intr_t *sip = &(sp->s_intr);
1326 	int index;
1327 
1328 	ASSERT3U(sip->si_state, ==, SFXGE_INTR_INITIALIZED);
1329 
1330 	sp->s_ev_moderation = 0;
1331 
1332 	/* Tear down the event queue(s) */
1333 	index = sip->si_nalloc;
1334 	while (--index >= 0)
1335 		sfxge_ev_qfini(sp, index);
1336 
1337 	kmem_cache_destroy(sp->s_eqXc);
1338 	kmem_cache_destroy(sp->s_eq0c);
1339 	sp->s_eqXc = NULL;
1340 	sp->s_eq0c = NULL;
1341 }
1342