1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * This file is part of the Chelsio T4 support code.
14 *
15 * Copyright (C) 2010-2013 Chelsio Communications. All rights reserved.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 * release for licensing terms and conditions.
21 */
22
23 /*
24 * Copyright 2021 Oxide Computer Company
25 */
26
27 #include <sys/ddi.h>
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/atomic.h>
31 #include <sys/dlpi.h>
32 #include <sys/pattr.h>
33 #include <sys/strsubr.h>
34 #include <sys/stream.h>
35 #include <sys/strsun.h>
36 #include <inet/ip.h>
37 #include <inet/tcp.h>
38
39 #include "version.h"
40 #include "common/common.h"
41 #include "common/t4_msg.h"
42 #include "common/t4_regs.h"
43 #include "common/t4_regs_values.h"
44
45 /* TODO: Tune. */
46 int rx_buf_size = 8192;
47 int tx_copy_threshold = 256;
48 uint16_t rx_copy_threshold = 256;
49
50 /* Used to track coalesced tx work request */
51 struct txpkts {
52 mblk_t *tail; /* head is in the software descriptor */
53 uint64_t *flitp; /* ptr to flit where next pkt should start */
54 uint8_t npkt; /* # of packets in this work request */
55 uint8_t nflits; /* # of flits used by this work request */
56 uint16_t plen; /* total payload (sum of all packets) */
57 };
58
59 /* All information needed to tx a frame */
60 struct txinfo {
61 uint32_t len; /* Total length of frame */
62 uint32_t flags; /* Checksum and LSO flags */
63 uint32_t mss; /* MSS for LSO */
64 uint8_t nsegs; /* # of segments in the SGL, 0 means imm. tx */
65 uint8_t nflits; /* # of flits needed for the SGL */
66 uint8_t hdls_used; /* # of DMA handles used */
67 uint32_t txb_used; /* txb_space used */
68 struct ulptx_sgl sgl __attribute__((aligned(8)));
69 struct ulptx_sge_pair reserved[TX_SGL_SEGS / 2];
70 };
71
72 static int service_iq(struct sge_iq *iq, int budget);
73 static inline void init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx,
74 int8_t pktc_idx, int qsize, uint8_t esize);
75 static inline void init_fl(struct sge_fl *fl, uint16_t qsize);
76 static inline void init_eq(struct adapter *sc, struct sge_eq *eq,
77 uint16_t eqtype, uint16_t qsize,uint8_t tx_chan, uint16_t iqid);
78 static int alloc_iq_fl(struct port_info *pi, struct sge_iq *iq,
79 struct sge_fl *fl, int intr_idx, int cong);
80 static int free_iq_fl(struct port_info *pi, struct sge_iq *iq,
81 struct sge_fl *fl);
82 static int alloc_fwq(struct adapter *sc);
83 static int free_fwq(struct adapter *sc);
84 #ifdef TCP_OFFLOAD_ENABLE
85 static int alloc_mgmtq(struct adapter *sc);
86 #endif
87 static int alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx,
88 int i);
89 static int free_rxq(struct port_info *pi, struct sge_rxq *rxq);
90 #ifdef TCP_OFFLOAD_ENABLE
91 static int alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
92 int intr_idx);
93 static int free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq);
94 #endif
95 static int ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq);
96 static int eth_eq_alloc(struct adapter *sc, struct port_info *pi,
97 struct sge_eq *eq);
98 #ifdef TCP_OFFLOAD_ENABLE
99 static int ofld_eq_alloc(struct adapter *sc, struct port_info *pi,
100 struct sge_eq *eq);
101 #endif
102 static int alloc_eq(struct adapter *sc, struct port_info *pi,
103 struct sge_eq *eq);
104 static int free_eq(struct adapter *sc, struct sge_eq *eq);
105 #ifdef TCP_OFFLOAD_ENABLE
106 static int alloc_wrq(struct adapter *sc, struct port_info *pi,
107 struct sge_wrq *wrq, int idx);
108 static int free_wrq(struct adapter *sc, struct sge_wrq *wrq);
109 #endif
110 static int alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx);
111 static int free_txq(struct port_info *pi, struct sge_txq *txq);
112 static int alloc_dma_memory(struct adapter *sc, size_t len, int flags,
113 ddi_device_acc_attr_t *acc_attr, ddi_dma_attr_t *dma_attr,
114 ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl, uint64_t *pba,
115 caddr_t *pva);
116 static int free_dma_memory(ddi_dma_handle_t *dhdl, ddi_acc_handle_t *ahdl);
117 static int alloc_desc_ring(struct adapter *sc, size_t len, int rw,
118 ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl, uint64_t *pba,
119 caddr_t *pva);
120 static int free_desc_ring(ddi_dma_handle_t *dhdl, ddi_acc_handle_t *ahdl);
121 static int alloc_tx_copybuffer(struct adapter *sc, size_t len,
122 ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl, uint64_t *pba,
123 caddr_t *pva);
124 static inline bool is_new_response(const struct sge_iq *iq,
125 struct rsp_ctrl **ctrl);
126 static inline void iq_next(struct sge_iq *iq);
127 static int refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs);
128 static void refill_sfl(void *arg);
129 static void add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl);
130 static void free_fl_bufs(struct sge_fl *fl);
131 static mblk_t *get_fl_payload(struct adapter *sc, struct sge_fl *fl,
132 uint32_t len_newbuf, int *fl_bufs_used);
133 static int get_frame_txinfo(struct sge_txq *txq, mblk_t **fp,
134 struct txinfo *txinfo, int sgl_only);
135 static inline int fits_in_txb(struct sge_txq *txq, int len, int *waste);
136 static inline int copy_into_txb(struct sge_txq *txq, mblk_t *m, int len,
137 struct txinfo *txinfo);
138 static inline void add_seg(struct txinfo *txinfo, uint64_t ba, uint32_t len);
139 static inline int add_mblk(struct sge_txq *txq, struct txinfo *txinfo,
140 mblk_t *m, int len);
141 static void free_txinfo_resources(struct sge_txq *txq, struct txinfo *txinfo);
142 static int add_to_txpkts(struct sge_txq *txq, struct txpkts *txpkts, mblk_t *m,
143 struct txinfo *txinfo);
144 static void write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts);
145 static int write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, mblk_t *m,
146 struct txinfo *txinfo);
147 static inline void write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
148 struct txpkts *txpkts, struct txinfo *txinfo);
149 static inline void copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to,
150 int len);
151 static inline void ring_tx_db(struct adapter *sc, struct sge_eq *eq);
152 static int reclaim_tx_descs(struct sge_txq *txq, int howmany);
153 static void write_txqflush_wr(struct sge_txq *txq);
154 static int t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss,
155 mblk_t *m);
156 static inline void ring_fl_db(struct adapter *sc, struct sge_fl *fl);
157 static kstat_t *setup_port_config_kstats(struct port_info *pi);
158 static kstat_t *setup_port_info_kstats(struct port_info *pi);
159 static kstat_t *setup_rxq_kstats(struct port_info *pi, struct sge_rxq *rxq,
160 int idx);
161 static int update_rxq_kstats(kstat_t *ksp, int rw);
162 static int update_port_info_kstats(kstat_t *ksp, int rw);
163 static kstat_t *setup_txq_kstats(struct port_info *pi, struct sge_txq *txq,
164 int idx);
165 static int update_txq_kstats(kstat_t *ksp, int rw);
166 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
167 mblk_t *);
168 static int handle_fw_rpl(struct sge_iq *iq, const struct rss_header *rss,
169 mblk_t *m);
170
171 static inline int
reclaimable(struct sge_eq * eq)172 reclaimable(struct sge_eq *eq)
173 {
174 unsigned int cidx;
175
176 cidx = eq->spg->cidx; /* stable snapshot */
177 cidx = be16_to_cpu(cidx);
178
179 if (cidx >= eq->cidx)
180 return (cidx - eq->cidx);
181 else
182 return (cidx + eq->cap - eq->cidx);
183 }
184
185 void
t4_sge_init(struct adapter * sc)186 t4_sge_init(struct adapter *sc)
187 {
188 struct driver_properties *p = &sc->props;
189 ddi_dma_attr_t *dma_attr;
190 ddi_device_acc_attr_t *acc_attr;
191 uint32_t sge_control, sge_conm_ctrl;
192 int egress_threshold;
193
194 /*
195 * Device access and DMA attributes for descriptor rings
196 */
197 acc_attr = &sc->sge.acc_attr_desc;
198 acc_attr->devacc_attr_version = DDI_DEVICE_ATTR_V0;
199 acc_attr->devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
200 acc_attr->devacc_attr_dataorder = DDI_STRICTORDER_ACC;
201
202 dma_attr = &sc->sge.dma_attr_desc;
203 dma_attr->dma_attr_version = DMA_ATTR_V0;
204 dma_attr->dma_attr_addr_lo = 0;
205 dma_attr->dma_attr_addr_hi = UINT64_MAX;
206 dma_attr->dma_attr_count_max = UINT64_MAX;
207 dma_attr->dma_attr_align = 512;
208 dma_attr->dma_attr_burstsizes = 0xfff;
209 dma_attr->dma_attr_minxfer = 1;
210 dma_attr->dma_attr_maxxfer = UINT64_MAX;
211 dma_attr->dma_attr_seg = UINT64_MAX;
212 dma_attr->dma_attr_sgllen = 1;
213 dma_attr->dma_attr_granular = 1;
214 dma_attr->dma_attr_flags = 0;
215
216 /*
217 * Device access and DMA attributes for tx buffers
218 */
219 acc_attr = &sc->sge.acc_attr_tx;
220 acc_attr->devacc_attr_version = DDI_DEVICE_ATTR_V0;
221 acc_attr->devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
222
223 dma_attr = &sc->sge.dma_attr_tx;
224 dma_attr->dma_attr_version = DMA_ATTR_V0;
225 dma_attr->dma_attr_addr_lo = 0;
226 dma_attr->dma_attr_addr_hi = UINT64_MAX;
227 dma_attr->dma_attr_count_max = UINT64_MAX;
228 dma_attr->dma_attr_align = 1;
229 dma_attr->dma_attr_burstsizes = 0xfff;
230 dma_attr->dma_attr_minxfer = 1;
231 dma_attr->dma_attr_maxxfer = UINT64_MAX;
232 dma_attr->dma_attr_seg = UINT64_MAX;
233 dma_attr->dma_attr_sgllen = TX_SGL_SEGS;
234 dma_attr->dma_attr_granular = 1;
235 dma_attr->dma_attr_flags = 0;
236
237 /*
238 * Ingress Padding Boundary and Egress Status Page Size are set up by
239 * t4_fixup_host_params().
240 */
241 sge_control = t4_read_reg(sc, A_SGE_CONTROL);
242 sc->sge.pktshift = G_PKTSHIFT(sge_control);
243 sc->sge.stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
244
245 /* t4_nex uses FLM packed mode */
246 sc->sge.fl_align = t4_fl_pkt_align(sc, true);
247
248 /*
249 * Device access and DMA attributes for rx buffers
250 */
251 sc->sge.rxb_params.dip = sc->dip;
252 sc->sge.rxb_params.buf_size = rx_buf_size;
253
254 acc_attr = &sc->sge.rxb_params.acc_attr_rx;
255 acc_attr->devacc_attr_version = DDI_DEVICE_ATTR_V0;
256 acc_attr->devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
257
258 dma_attr = &sc->sge.rxb_params.dma_attr_rx;
259 dma_attr->dma_attr_version = DMA_ATTR_V0;
260 dma_attr->dma_attr_addr_lo = 0;
261 dma_attr->dma_attr_addr_hi = UINT64_MAX;
262 dma_attr->dma_attr_count_max = UINT64_MAX;
263 /*
264 * Low 4 bits of an rx buffer address have a special meaning to the SGE
265 * and an rx buf cannot have an address with any of these bits set.
266 * FL_ALIGN is >= 32 so we're sure things are ok.
267 */
268 dma_attr->dma_attr_align = sc->sge.fl_align;
269 dma_attr->dma_attr_burstsizes = 0xfff;
270 dma_attr->dma_attr_minxfer = 1;
271 dma_attr->dma_attr_maxxfer = UINT64_MAX;
272 dma_attr->dma_attr_seg = UINT64_MAX;
273 dma_attr->dma_attr_sgllen = 1;
274 dma_attr->dma_attr_granular = 1;
275 dma_attr->dma_attr_flags = 0;
276
277 sc->sge.rxbuf_cache = rxbuf_cache_create(&sc->sge.rxb_params);
278
279 /*
280 * A FL with <= fl_starve_thres buffers is starving and a periodic
281 * timer will attempt to refill it. This needs to be larger than the
282 * SGE's Egress Congestion Threshold. If it isn't, then we can get
283 * stuck waiting for new packets while the SGE is waiting for us to
284 * give it more Free List entries. (Note that the SGE's Egress
285 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
286 * there was only a single field to control this. For T5 there's the
287 * original field which now only applies to Unpacked Mode Free List
288 * buffers and a new field which only applies to Packed Mode Free List
289 * buffers.
290 */
291
292 sge_conm_ctrl = t4_read_reg(sc, A_SGE_CONM_CTRL);
293 switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
294 case CHELSIO_T4:
295 egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
296 break;
297 case CHELSIO_T5:
298 egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
299 break;
300 case CHELSIO_T6:
301 default:
302 egress_threshold = G_T6_EGRTHRESHOLDPACKING(sge_conm_ctrl);
303 }
304 sc->sge.fl_starve_threshold = 2*egress_threshold + 1;
305
306 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, rx_buf_size);
307
308 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
309 V_THRESHOLD_0(p->counter_val[0]) |
310 V_THRESHOLD_1(p->counter_val[1]) |
311 V_THRESHOLD_2(p->counter_val[2]) |
312 V_THRESHOLD_3(p->counter_val[3]));
313
314 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
315 V_TIMERVALUE0(us_to_core_ticks(sc, p->timer_val[0])) |
316 V_TIMERVALUE1(us_to_core_ticks(sc, p->timer_val[1])));
317 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
318 V_TIMERVALUE2(us_to_core_ticks(sc, p->timer_val[2])) |
319 V_TIMERVALUE3(us_to_core_ticks(sc, p->timer_val[3])));
320 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
321 V_TIMERVALUE4(us_to_core_ticks(sc, p->timer_val[4])) |
322 V_TIMERVALUE5(us_to_core_ticks(sc, p->timer_val[5])));
323
324 (void) t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_rpl);
325 (void) t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_rpl);
326 (void) t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
327 (void) t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx);
328 (void) t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL,
329 t4_handle_fw_rpl);
330 }
331
332 /*
333 * Allocate and initialize the firmware event queue and the forwarded interrupt
334 * queues, if any. The adapter owns all these queues as they are not associated
335 * with any particular port.
336 *
337 * Returns errno on failure. Resources allocated up to that point may still be
338 * allocated. Caller is responsible for cleanup in case this function fails.
339 */
340 int
t4_setup_adapter_queues(struct adapter * sc)341 t4_setup_adapter_queues(struct adapter *sc)
342 {
343 int rc;
344
345 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
346
347 /*
348 * Firmware event queue
349 */
350 rc = alloc_fwq(sc);
351 if (rc != 0)
352 return (rc);
353
354 #ifdef TCP_OFFLOAD_ENABLE
355 /*
356 * Management queue. This is just a control queue that uses the fwq as
357 * its associated iq.
358 */
359 rc = alloc_mgmtq(sc);
360 #endif
361
362 return (rc);
363 }
364
365 /*
366 * Idempotent
367 */
368 int
t4_teardown_adapter_queues(struct adapter * sc)369 t4_teardown_adapter_queues(struct adapter *sc)
370 {
371
372 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
373
374 (void) free_fwq(sc);
375
376 return (0);
377 }
378
379 static inline int
first_vector(struct port_info * pi)380 first_vector(struct port_info *pi)
381 {
382 struct adapter *sc = pi->adapter;
383 int rc = T4_EXTRA_INTR, i;
384
385 if (sc->intr_count == 1)
386 return (0);
387
388 for_each_port(sc, i) {
389 struct port_info *p = sc->port[i];
390
391 if (i == pi->port_id)
392 break;
393
394 #ifdef TCP_OFFLOAD_ENABLE
395 if (!(sc->flags & INTR_FWD))
396 rc += p->nrxq + p->nofldrxq;
397 else
398 rc += max(p->nrxq, p->nofldrxq);
399 #else
400 /*
401 * Not compiled with offload support and intr_count > 1. Only
402 * NIC queues exist and they'd better be taking direct
403 * interrupts.
404 */
405 ASSERT(!(sc->flags & INTR_FWD));
406 rc += p->nrxq;
407 #endif
408 }
409 return (rc);
410 }
411
412 /*
413 * Given an arbitrary "index," come up with an iq that can be used by other
414 * queues (of this port) for interrupt forwarding, SGE egress updates, etc.
415 * The iq returned is guaranteed to be something that takes direct interrupts.
416 */
417 static struct sge_iq *
port_intr_iq(struct port_info * pi,int idx)418 port_intr_iq(struct port_info *pi, int idx)
419 {
420 struct adapter *sc = pi->adapter;
421 struct sge *s = &sc->sge;
422 struct sge_iq *iq = NULL;
423
424 if (sc->intr_count == 1)
425 return (&sc->sge.fwq);
426
427 #ifdef TCP_OFFLOAD_ENABLE
428 if (!(sc->flags & INTR_FWD)) {
429 idx %= pi->nrxq + pi->nofldrxq;
430
431 if (idx >= pi->nrxq) {
432 idx -= pi->nrxq;
433 iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
434 } else
435 iq = &s->rxq[pi->first_rxq + idx].iq;
436
437 } else {
438 idx %= max(pi->nrxq, pi->nofldrxq);
439
440 if (pi->nrxq >= pi->nofldrxq)
441 iq = &s->rxq[pi->first_rxq + idx].iq;
442 else
443 iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
444 }
445 #else
446 /*
447 * Not compiled with offload support and intr_count > 1. Only NIC
448 * queues exist and they'd better be taking direct interrupts.
449 */
450 ASSERT(!(sc->flags & INTR_FWD));
451
452 idx %= pi->nrxq;
453 iq = &s->rxq[pi->first_rxq + idx].iq;
454 #endif
455
456 return (iq);
457 }
458
459 int
t4_setup_port_queues(struct port_info * pi)460 t4_setup_port_queues(struct port_info *pi)
461 {
462 int rc = 0, i, intr_idx, j;
463 struct sge_rxq *rxq;
464 struct sge_txq *txq;
465 #ifdef TCP_OFFLOAD_ENABLE
466 int iqid;
467 struct sge_wrq *ctrlq;
468 struct sge_ofld_rxq *ofld_rxq;
469 struct sge_wrq *ofld_txq;
470 #endif
471 struct adapter *sc = pi->adapter;
472 struct driver_properties *p = &sc->props;
473
474 pi->ksp_config = setup_port_config_kstats(pi);
475 pi->ksp_info = setup_port_info_kstats(pi);
476
477 /* Interrupt vector to start from (when using multiple vectors) */
478 intr_idx = first_vector(pi);
479
480 /*
481 * First pass over all rx queues (NIC and TOE):
482 * a) initialize iq and fl
483 * b) allocate queue iff it will take direct interrupts.
484 */
485
486 for_each_rxq(pi, i, rxq) {
487
488 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, p->qsize_rxq,
489 RX_IQ_ESIZE);
490
491 init_fl(&rxq->fl, p->qsize_rxq / 8); /* 8 bufs in each entry */
492
493 if ((!(sc->flags & INTR_FWD))
494 #ifdef TCP_OFFLOAD_ENABLE
495 || (sc->intr_count > 1 && pi->nrxq >= pi->nofldrxq)
496 #else
497 || (sc->intr_count > 1 && pi->nrxq)
498 #endif
499 ) {
500 rxq->iq.flags |= IQ_INTR;
501 rc = alloc_rxq(pi, rxq, intr_idx, i);
502 if (rc != 0)
503 goto done;
504 intr_idx++;
505 }
506
507 }
508
509 #ifdef TCP_OFFLOAD_ENABLE
510 for_each_ofld_rxq(pi, i, ofld_rxq) {
511
512 init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
513 p->qsize_rxq, RX_IQ_ESIZE);
514
515 init_fl(&ofld_rxq->fl, p->qsize_rxq / 8);
516
517 if (!(sc->flags & INTR_FWD) ||
518 (sc->intr_count > 1 && pi->nofldrxq > pi->nrxq)) {
519 ofld_rxq->iq.flags = IQ_INTR;
520 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx);
521 if (rc != 0)
522 goto done;
523
524 intr_idx++;
525 }
526 }
527 #endif
528
529 /*
530 * Second pass over all rx queues (NIC and TOE). The queues forwarding
531 * their interrupts are allocated now.
532 */
533 j = 0;
534 for_each_rxq(pi, i, rxq) {
535 if (rxq->iq.flags & IQ_INTR)
536 continue;
537
538 intr_idx = port_intr_iq(pi, j)->abs_id;
539
540 rc = alloc_rxq(pi, rxq, intr_idx, i);
541 if (rc != 0)
542 goto done;
543 j++;
544 }
545
546 #ifdef TCP_OFFLOAD_ENABLE
547 for_each_ofld_rxq(pi, i, ofld_rxq) {
548 if (ofld_rxq->iq.flags & IQ_INTR)
549 continue;
550
551 intr_idx = port_intr_iq(pi, j)->abs_id;
552 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx);
553 if (rc != 0)
554 goto done;
555 j++;
556 }
557 #endif
558 /*
559 * Now the tx queues. Only one pass needed.
560 */
561 j = 0;
562 for_each_txq(pi, i, txq) {
563 uint16_t iqid;
564
565 iqid = port_intr_iq(pi, j)->cntxt_id;
566 init_eq(sc, &txq->eq, EQ_ETH, p->qsize_txq, pi->tx_chan, iqid);
567 rc = alloc_txq(pi, txq, i);
568 if (rc != 0)
569 goto done;
570 }
571
572 #ifdef TCP_OFFLOAD_ENABLE
573 for_each_ofld_txq(pi, i, ofld_txq) {
574 uint16_t iqid;
575
576 iqid = port_intr_iq(pi, j)->cntxt_id;
577 init_eq(sc, &ofld_txq->eq, EQ_OFLD, p->qsize_txq, pi->tx_chan,
578 iqid);
579 rc = alloc_wrq(sc, pi, ofld_txq, i);
580 if (rc != 0)
581 goto done;
582 }
583
584 /*
585 * Finally, the control queue.
586 */
587 ctrlq = &sc->sge.ctrlq[pi->port_id];
588 iqid = port_intr_iq(pi, 0)->cntxt_id;
589 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid);
590 rc = alloc_wrq(sc, pi, ctrlq, 0);
591 #endif
592
593 done:
594 if (rc != 0)
595 (void) t4_teardown_port_queues(pi);
596
597 return (rc);
598 }
599
600 /*
601 * Idempotent
602 */
603 int
t4_teardown_port_queues(struct port_info * pi)604 t4_teardown_port_queues(struct port_info *pi)
605 {
606 int i;
607 struct sge_rxq *rxq;
608 struct sge_txq *txq;
609 #ifdef TCP_OFFLOAD_ENABLE
610 struct adapter *sc = pi->adapter;
611 struct sge_ofld_rxq *ofld_rxq;
612 struct sge_wrq *ofld_txq;
613 #endif
614
615 if (pi->ksp_config != NULL) {
616 kstat_delete(pi->ksp_config);
617 pi->ksp_config = NULL;
618 }
619 if (pi->ksp_info != NULL) {
620 kstat_delete(pi->ksp_info);
621 pi->ksp_info = NULL;
622 }
623
624 #ifdef TCP_OFFLOAD_ENABLE
625 (void) free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
626 #endif
627
628 for_each_txq(pi, i, txq) {
629 (void) free_txq(pi, txq);
630 }
631
632 #ifdef TCP_OFFLOAD_ENABLE
633 for_each_ofld_txq(pi, i, ofld_txq) {
634 (void) free_wrq(sc, ofld_txq);
635 }
636
637 for_each_ofld_rxq(pi, i, ofld_rxq) {
638 if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
639 (void) free_ofld_rxq(pi, ofld_rxq);
640 }
641 #endif
642
643 for_each_rxq(pi, i, rxq) {
644 if ((rxq->iq.flags & IQ_INTR) == 0)
645 (void) free_rxq(pi, rxq);
646 }
647
648 /*
649 * Then take down the rx queues that take direct interrupts.
650 */
651
652 for_each_rxq(pi, i, rxq) {
653 if (rxq->iq.flags & IQ_INTR)
654 (void) free_rxq(pi, rxq);
655 }
656
657 #ifdef TCP_OFFLOAD_ENABLE
658 for_each_ofld_rxq(pi, i, ofld_rxq) {
659 if (ofld_rxq->iq.flags & IQ_INTR)
660 (void) free_ofld_rxq(pi, ofld_rxq);
661 }
662 #endif
663
664 return (0);
665 }
666
667 /* Deals with errors and forwarded interrupts */
668 uint_t
t4_intr_all(caddr_t arg1,caddr_t arg2)669 t4_intr_all(caddr_t arg1, caddr_t arg2)
670 {
671
672 (void) t4_intr_err(arg1, arg2);
673 (void) t4_intr(arg1, arg2);
674
675 return (DDI_INTR_CLAIMED);
676 }
677
678 static void
t4_intr_rx_work(struct sge_iq * iq)679 t4_intr_rx_work(struct sge_iq *iq)
680 {
681 mblk_t *mp = NULL;
682 struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */
683 RXQ_LOCK(rxq);
684 if (!iq->polling) {
685 mp = t4_ring_rx(rxq, iq->qsize/8);
686 t4_write_reg(iq->adapter, MYPF_REG(A_SGE_PF_GTS),
687 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
688 }
689 RXQ_UNLOCK(rxq);
690 if (mp != NULL)
691 mac_rx_ring(rxq->port->mh, rxq->ring_handle, mp,
692 rxq->ring_gen_num);
693 }
694
695 /* Deals with interrupts on the given ingress queue */
696 /* ARGSUSED */
697 uint_t
t4_intr(caddr_t arg1,caddr_t arg2)698 t4_intr(caddr_t arg1, caddr_t arg2)
699 {
700 struct sge_iq *iq = (struct sge_iq *)arg2;
701 int state;
702
703 /* Right now receive polling is only enabled for MSI-X and
704 * when we have enough msi-x vectors i.e no interrupt forwarding.
705 */
706 if (iq->adapter->props.multi_rings) {
707 t4_intr_rx_work(iq);
708 } else {
709 state = atomic_cas_uint(&iq->state, IQS_IDLE, IQS_BUSY);
710 if (state == IQS_IDLE) {
711 (void) service_iq(iq, 0);
712 (void) atomic_cas_uint(&iq->state, IQS_BUSY, IQS_IDLE);
713 }
714 }
715 return (DDI_INTR_CLAIMED);
716 }
717
718 /* Deals with error interrupts */
719 /* ARGSUSED */
720 uint_t
t4_intr_err(caddr_t arg1,caddr_t arg2)721 t4_intr_err(caddr_t arg1, caddr_t arg2)
722 {
723 /* LINTED: E_BAD_PTR_CAST_ALIGN */
724 struct adapter *sc = (struct adapter *)arg1;
725
726 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
727 (void) t4_slow_intr_handler(sc);
728
729 return (DDI_INTR_CLAIMED);
730 }
731
732 /*
733 * t4_ring_rx - Process responses from an SGE response queue.
734 *
735 * This function processes responses from an SGE response queue up to the supplied budget.
736 * Responses include received packets as well as control messages from FW
737 * or HW.
738 * It returns a chain of mblks containing the received data, to be
739 * passed up to mac_ring_rx().
740 */
741 mblk_t *
t4_ring_rx(struct sge_rxq * rxq,int budget)742 t4_ring_rx(struct sge_rxq *rxq, int budget)
743 {
744 struct sge_iq *iq = &rxq->iq;
745 struct sge_fl *fl = &rxq->fl; /* Use iff IQ_HAS_FL */
746 struct adapter *sc = iq->adapter;
747 struct rsp_ctrl *ctrl;
748 const struct rss_header *rss;
749 int ndescs = 0, fl_bufs_used = 0;
750 int rsp_type;
751 uint32_t lq;
752 mblk_t *mblk_head = NULL, **mblk_tail, *m;
753 struct cpl_rx_pkt *cpl;
754 uint32_t received_bytes = 0, pkt_len = 0;
755 bool csum_ok;
756 uint16_t err_vec;
757
758 mblk_tail = &mblk_head;
759
760 while (is_new_response(iq, &ctrl)) {
761
762 membar_consumer();
763
764 m = NULL;
765 rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
766 lq = be32_to_cpu(ctrl->pldbuflen_qid);
767 rss = (const void *)iq->cdesc;
768
769 switch (rsp_type) {
770 case X_RSPD_TYPE_FLBUF:
771
772 ASSERT(iq->flags & IQ_HAS_FL);
773
774 if (CPL_RX_PKT == rss->opcode) {
775 cpl = (void *)(rss + 1);
776 pkt_len = be16_to_cpu(cpl->len);
777
778 if (iq->polling && ((received_bytes + pkt_len) > budget))
779 goto done;
780
781 m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
782 if (m == NULL)
783 goto done;
784
785 iq->intr_next = iq->intr_params;
786 m->b_rptr += sc->sge.pktshift;
787 if (sc->params.tp.rx_pkt_encap)
788 /* It is enabled only in T6 config file */
789 err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
790 else
791 err_vec = ntohs(cpl->err_vec);
792
793 csum_ok = cpl->csum_calc && !err_vec;
794
795 /* TODO: what about cpl->ip_frag? */
796 if (csum_ok && !cpl->ip_frag) {
797 mac_hcksum_set(m, 0, 0, 0, 0xffff,
798 HCK_FULLCKSUM_OK | HCK_FULLCKSUM |
799 HCK_IPV4_HDRCKSUM_OK);
800 rxq->rxcsum++;
801 }
802 rxq->rxpkts++;
803 rxq->rxbytes += pkt_len;
804 received_bytes += pkt_len;
805
806 *mblk_tail = m;
807 mblk_tail = &m->b_next;
808
809 break;
810 }
811
812 m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
813 if (m == NULL)
814 goto done;
815 /* FALLTHROUGH */
816
817 case X_RSPD_TYPE_CPL:
818 ASSERT(rss->opcode < NUM_CPL_CMDS);
819 sc->cpl_handler[rss->opcode](iq, rss, m);
820 break;
821
822 default:
823 break;
824 }
825 iq_next(iq);
826 ++ndescs;
827 if (!iq->polling && (ndescs == budget))
828 break;
829 }
830
831 done:
832
833 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
834 V_CIDXINC(ndescs) | V_INGRESSQID(iq->cntxt_id) |
835 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
836
837 if ((fl_bufs_used > 0) || (iq->flags & IQ_HAS_FL)) {
838 int starved;
839 FL_LOCK(fl);
840 fl->needed += fl_bufs_used;
841 starved = refill_fl(sc, fl, fl->cap / 8);
842 FL_UNLOCK(fl);
843 if (starved)
844 add_fl_to_sfl(sc, fl);
845 }
846 return (mblk_head);
847 }
848
849 /*
850 * Deals with anything and everything on the given ingress queue.
851 */
852 static int
service_iq(struct sge_iq * iq,int budget)853 service_iq(struct sge_iq *iq, int budget)
854 {
855 struct sge_iq *q;
856 struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */
857 struct sge_fl *fl = &rxq->fl; /* Use iff IQ_HAS_FL */
858 struct adapter *sc = iq->adapter;
859 struct rsp_ctrl *ctrl;
860 const struct rss_header *rss;
861 int ndescs = 0, limit, fl_bufs_used = 0;
862 int rsp_type;
863 uint32_t lq;
864 int starved;
865 mblk_t *m;
866 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
867
868 limit = budget ? budget : iq->qsize / 8;
869
870 /*
871 * We always come back and check the descriptor ring for new indirect
872 * interrupts and other responses after running a single handler.
873 */
874 for (;;) {
875 while (is_new_response(iq, &ctrl)) {
876
877 membar_consumer();
878
879 m = NULL;
880 rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
881 lq = be32_to_cpu(ctrl->pldbuflen_qid);
882 rss = (const void *)iq->cdesc;
883
884 switch (rsp_type) {
885 case X_RSPD_TYPE_FLBUF:
886
887 ASSERT(iq->flags & IQ_HAS_FL);
888
889 m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
890 if (m == NULL) {
891 /*
892 * Rearm the iq with a
893 * longer-than-default timer
894 */
895 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
896 V_INGRESSQID((u32)iq->cntxt_id) |
897 V_SEINTARM(V_QINTR_TIMER_IDX(SGE_NTIMERS-1)));
898 if (fl_bufs_used > 0) {
899 ASSERT(iq->flags & IQ_HAS_FL);
900 FL_LOCK(fl);
901 fl->needed += fl_bufs_used;
902 starved = refill_fl(sc, fl, fl->cap / 8);
903 FL_UNLOCK(fl);
904 if (starved)
905 add_fl_to_sfl(sc, fl);
906 }
907 return (0);
908 }
909
910 /* FALLTHRU */
911 case X_RSPD_TYPE_CPL:
912
913 ASSERT(rss->opcode < NUM_CPL_CMDS);
914 sc->cpl_handler[rss->opcode](iq, rss, m);
915 break;
916
917 case X_RSPD_TYPE_INTR:
918
919 /*
920 * Interrupts should be forwarded only to queues
921 * that are not forwarding their interrupts.
922 * This means service_iq can recurse but only 1
923 * level deep.
924 */
925 ASSERT(budget == 0);
926
927 q = sc->sge.iqmap[lq - sc->sge.iq_start];
928 if (atomic_cas_uint(&q->state, IQS_IDLE,
929 IQS_BUSY) == IQS_IDLE) {
930 if (service_iq(q, q->qsize / 8) == 0) {
931 (void) atomic_cas_uint(
932 &q->state, IQS_BUSY,
933 IQS_IDLE);
934 } else {
935 STAILQ_INSERT_TAIL(&iql, q,
936 link);
937 }
938 }
939 break;
940
941 default:
942 break;
943 }
944
945 iq_next(iq);
946 if (++ndescs == limit) {
947 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
948 V_CIDXINC(ndescs) |
949 V_INGRESSQID(iq->cntxt_id) |
950 V_SEINTARM(V_QINTR_TIMER_IDX(
951 X_TIMERREG_UPDATE_CIDX)));
952 ndescs = 0;
953
954 if (fl_bufs_used > 0) {
955 ASSERT(iq->flags & IQ_HAS_FL);
956 FL_LOCK(fl);
957 fl->needed += fl_bufs_used;
958 (void) refill_fl(sc, fl, fl->cap / 8);
959 FL_UNLOCK(fl);
960 fl_bufs_used = 0;
961 }
962
963 if (budget != 0)
964 return (EINPROGRESS);
965 }
966 }
967
968 if (STAILQ_EMPTY(&iql) != 0)
969 break;
970
971 /*
972 * Process the head only, and send it to the back of the list if
973 * it's still not done.
974 */
975 q = STAILQ_FIRST(&iql);
976 STAILQ_REMOVE_HEAD(&iql, link);
977 if (service_iq(q, q->qsize / 8) == 0)
978 (void) atomic_cas_uint(&q->state, IQS_BUSY, IQS_IDLE);
979 else
980 STAILQ_INSERT_TAIL(&iql, q, link);
981 }
982
983 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
984 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
985
986 if (iq->flags & IQ_HAS_FL) {
987
988 FL_LOCK(fl);
989 fl->needed += fl_bufs_used;
990 starved = refill_fl(sc, fl, fl->cap / 4);
991 FL_UNLOCK(fl);
992 if (starved != 0)
993 add_fl_to_sfl(sc, fl);
994 }
995
996 return (0);
997 }
998
999 #ifdef TCP_OFFLOAD_ENABLE
1000 int
t4_mgmt_tx(struct adapter * sc,mblk_t * m)1001 t4_mgmt_tx(struct adapter *sc, mblk_t *m)
1002 {
1003 return (t4_wrq_tx(sc, &sc->sge.mgmtq, m));
1004 }
1005
1006 /*
1007 * Doesn't fail. Holds on to work requests it can't send right away.
1008 */
1009 int
t4_wrq_tx_locked(struct adapter * sc,struct sge_wrq * wrq,mblk_t * m0)1010 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, mblk_t *m0)
1011 {
1012 struct sge_eq *eq = &wrq->eq;
1013 struct mblk_pair *wr_list = &wrq->wr_list;
1014 int can_reclaim;
1015 caddr_t dst;
1016 mblk_t *wr, *next;
1017
1018 TXQ_LOCK_ASSERT_OWNED(wrq);
1019 #ifdef TCP_OFFLOAD_ENABLE
1020 ASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD ||
1021 (eq->flags & EQ_TYPEMASK) == EQ_CTRL);
1022 #else
1023 ASSERT((eq->flags & EQ_TYPEMASK) == EQ_CTRL);
1024 #endif
1025
1026 if (m0 != NULL) {
1027 if (wr_list->head != NULL)
1028 wr_list->tail->b_next = m0;
1029 else
1030 wr_list->head = m0;
1031 while (m0->b_next)
1032 m0 = m0->b_next;
1033 wr_list->tail = m0;
1034 }
1035
1036 can_reclaim = reclaimable(eq);
1037 eq->cidx += can_reclaim;
1038 eq->avail += can_reclaim;
1039 if (eq->cidx >= eq->cap)
1040 eq->cidx -= eq->cap;
1041
1042 for (wr = wr_list->head; wr; wr = next) {
1043 int ndesc, len = 0;
1044 mblk_t *m;
1045
1046 next = wr->b_next;
1047 wr->b_next = NULL;
1048
1049 for (m = wr; m; m = m->b_cont)
1050 len += MBLKL(m);
1051
1052 ASSERT(len > 0 && (len & 0x7) == 0);
1053 ASSERT(len <= SGE_MAX_WR_LEN);
1054
1055 ndesc = howmany(len, EQ_ESIZE);
1056 if (eq->avail < ndesc) {
1057 wr->b_next = next;
1058 wrq->no_desc++;
1059 break;
1060 }
1061
1062 dst = (void *)&eq->desc[eq->pidx];
1063 for (m = wr; m; m = m->b_cont)
1064 copy_to_txd(eq, (void *)m->b_rptr, &dst, MBLKL(m));
1065
1066 eq->pidx += ndesc;
1067 eq->avail -= ndesc;
1068 if (eq->pidx >= eq->cap)
1069 eq->pidx -= eq->cap;
1070
1071 eq->pending += ndesc;
1072 if (eq->pending > 16)
1073 ring_tx_db(sc, eq);
1074
1075 wrq->tx_wrs++;
1076 freemsg(wr);
1077
1078 if (eq->avail < 8) {
1079 can_reclaim = reclaimable(eq);
1080 eq->cidx += can_reclaim;
1081 eq->avail += can_reclaim;
1082 if (eq->cidx >= eq->cap)
1083 eq->cidx -= eq->cap;
1084 }
1085 }
1086
1087 if (eq->pending != 0)
1088 ring_tx_db(sc, eq);
1089
1090 if (wr == NULL)
1091 wr_list->head = wr_list->tail = NULL;
1092 else {
1093 wr_list->head = wr;
1094
1095 ASSERT(wr_list->tail->b_next == NULL);
1096 }
1097
1098 return (0);
1099 }
1100 #endif
1101
1102 /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
1103 #define TXPKTS_PKT_HDR ((\
1104 sizeof (struct ulp_txpkt) + \
1105 sizeof (struct ulptx_idata) + \
1106 sizeof (struct cpl_tx_pkt_core)) / 8)
1107
1108 /* Header of a coalesced tx WR, before SGL of first packet (in flits) */
1109 #define TXPKTS_WR_HDR (\
1110 sizeof (struct fw_eth_tx_pkts_wr) / 8 + \
1111 TXPKTS_PKT_HDR)
1112
1113 /* Header of a tx WR, before SGL of first packet (in flits) */
1114 #define TXPKT_WR_HDR ((\
1115 sizeof (struct fw_eth_tx_pkt_wr) + \
1116 sizeof (struct cpl_tx_pkt_core)) / 8)
1117
1118 /* Header of a tx LSO WR, before SGL of first packet (in flits) */
1119 #define TXPKT_LSO_WR_HDR ((\
1120 sizeof (struct fw_eth_tx_pkt_wr) + \
1121 sizeof(struct cpl_tx_pkt_lso_core) + \
1122 sizeof (struct cpl_tx_pkt_core)) / 8)
1123
1124 mblk_t *
t4_eth_tx(void * arg,mblk_t * frame)1125 t4_eth_tx(void *arg, mblk_t *frame)
1126 {
1127 struct sge_txq *txq = (struct sge_txq *) arg;
1128 struct port_info *pi = txq->port;
1129 struct adapter *sc = pi->adapter;
1130 struct sge_eq *eq = &txq->eq;
1131 mblk_t *next_frame;
1132 int rc, coalescing;
1133 struct txpkts txpkts;
1134 struct txinfo txinfo;
1135
1136 txpkts.npkt = 0; /* indicates there's nothing in txpkts */
1137 coalescing = 0;
1138
1139 TXQ_LOCK(txq);
1140 if (eq->avail < 8)
1141 (void) reclaim_tx_descs(txq, 8);
1142 for (; frame; frame = next_frame) {
1143
1144 if (eq->avail < 8)
1145 break;
1146
1147 next_frame = frame->b_next;
1148 frame->b_next = NULL;
1149
1150 if (next_frame != NULL)
1151 coalescing = 1;
1152
1153 rc = get_frame_txinfo(txq, &frame, &txinfo, coalescing);
1154 if (rc != 0) {
1155 if (rc == ENOMEM) {
1156
1157 /* Short of resources, suspend tx */
1158
1159 frame->b_next = next_frame;
1160 break;
1161 }
1162
1163 /*
1164 * Unrecoverable error for this frame, throw it
1165 * away and move on to the next.
1166 */
1167
1168 freemsg(frame);
1169 continue;
1170 }
1171
1172 if (coalescing != 0 &&
1173 add_to_txpkts(txq, &txpkts, frame, &txinfo) == 0) {
1174
1175 /* Successfully absorbed into txpkts */
1176
1177 write_ulp_cpl_sgl(pi, txq, &txpkts, &txinfo);
1178 goto doorbell;
1179 }
1180
1181 /*
1182 * We weren't coalescing to begin with, or current frame could
1183 * not be coalesced (add_to_txpkts flushes txpkts if a frame
1184 * given to it can't be coalesced). Either way there should be
1185 * nothing in txpkts.
1186 */
1187 ASSERT(txpkts.npkt == 0);
1188
1189 /* We're sending out individual frames now */
1190 coalescing = 0;
1191
1192 if (eq->avail < 8)
1193 (void) reclaim_tx_descs(txq, 8);
1194 rc = write_txpkt_wr(pi, txq, frame, &txinfo);
1195 if (rc != 0) {
1196
1197 /* Short of hardware descriptors, suspend tx */
1198
1199 /*
1200 * This is an unlikely but expensive failure. We've
1201 * done all the hard work (DMA bindings etc.) and now we
1202 * can't send out the frame. What's worse, we have to
1203 * spend even more time freeing up everything in txinfo.
1204 */
1205 txq->qfull++;
1206 free_txinfo_resources(txq, &txinfo);
1207
1208 frame->b_next = next_frame;
1209 break;
1210 }
1211
1212 doorbell:
1213 /* Fewer and fewer doorbells as the queue fills up */
1214 if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2))) {
1215 txq->txbytes += txinfo.len;
1216 txq->txpkts++;
1217 ring_tx_db(sc, eq);
1218 }
1219 (void) reclaim_tx_descs(txq, 32);
1220 }
1221
1222 if (txpkts.npkt > 0)
1223 write_txpkts_wr(txq, &txpkts);
1224
1225 /*
1226 * frame not NULL means there was an error but we haven't thrown it
1227 * away. This can happen when we're short of tx descriptors (qfull) or
1228 * maybe even DMA handles (dma_hdl_failed). Either way, a credit flush
1229 * and reclaim will get things going again.
1230 *
1231 * If eq->avail is already 0 we know a credit flush was requested in the
1232 * WR that reduced it to 0 so we don't need another flush (we don't have
1233 * any descriptor for a flush WR anyway, duh).
1234 */
1235 if (frame && eq->avail > 0)
1236 write_txqflush_wr(txq);
1237
1238 if (eq->pending != 0)
1239 ring_tx_db(sc, eq);
1240
1241 (void) reclaim_tx_descs(txq, eq->qsize);
1242 TXQ_UNLOCK(txq);
1243
1244 return (frame);
1245 }
1246
1247 static inline void
init_iq(struct sge_iq * iq,struct adapter * sc,int tmr_idx,int8_t pktc_idx,int qsize,uint8_t esize)1248 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int8_t pktc_idx,
1249 int qsize, uint8_t esize)
1250 {
1251 ASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS);
1252 ASSERT(pktc_idx < SGE_NCOUNTERS); /* -ve is ok, means don't use */
1253
1254 iq->flags = 0;
1255 iq->adapter = sc;
1256 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
1257 iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
1258 if (pktc_idx >= 0) {
1259 iq->intr_params |= F_QINTR_CNT_EN;
1260 iq->intr_pktc_idx = pktc_idx;
1261 }
1262 iq->qsize = roundup(qsize, 16); /* See FW_IQ_CMD/iqsize */
1263 iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */
1264 }
1265
1266 static inline void
init_fl(struct sge_fl * fl,uint16_t qsize)1267 init_fl(struct sge_fl *fl, uint16_t qsize)
1268 {
1269
1270 fl->qsize = qsize;
1271 fl->allocb_fail = 0;
1272 }
1273
1274 static inline void
init_eq(struct adapter * sc,struct sge_eq * eq,uint16_t eqtype,uint16_t qsize,uint8_t tx_chan,uint16_t iqid)1275 init_eq(struct adapter *sc, struct sge_eq *eq, uint16_t eqtype, uint16_t qsize,
1276 uint8_t tx_chan, uint16_t iqid)
1277 {
1278 struct sge *s = &sc->sge;
1279 uint32_t r;
1280
1281 ASSERT(tx_chan < NCHAN);
1282 ASSERT(eqtype <= EQ_TYPEMASK);
1283
1284 if (is_t5(sc->params.chip)) {
1285 r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
1286 r >>= S_QUEUESPERPAGEPF0 +
1287 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
1288 s->s_qpp = r & M_QUEUESPERPAGEPF0;
1289 }
1290
1291 eq->flags = eqtype & EQ_TYPEMASK;
1292 eq->tx_chan = tx_chan;
1293 eq->iqid = iqid;
1294 eq->qsize = qsize;
1295 }
1296
1297 /*
1298 * Allocates the ring for an ingress queue and an optional freelist. If the
1299 * freelist is specified it will be allocated and then associated with the
1300 * ingress queue.
1301 *
1302 * Returns errno on failure. Resources allocated up to that point may still be
1303 * allocated. Caller is responsible for cleanup in case this function fails.
1304 *
1305 * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
1306 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies
1307 * the index of the queue to which its interrupts will be forwarded.
1308 */
1309 static int
alloc_iq_fl(struct port_info * pi,struct sge_iq * iq,struct sge_fl * fl,int intr_idx,int cong)1310 alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
1311 int intr_idx, int cong)
1312 {
1313 int rc, i, cntxt_id;
1314 size_t len;
1315 struct fw_iq_cmd c;
1316 struct adapter *sc = iq->adapter;
1317 uint32_t v = 0;
1318
1319 len = iq->qsize * iq->esize;
1320 rc = alloc_desc_ring(sc, len, DDI_DMA_READ, &iq->dhdl, &iq->ahdl,
1321 &iq->ba, (caddr_t *)&iq->desc);
1322 if (rc != 0)
1323 return (rc);
1324
1325 bzero(&c, sizeof (c));
1326 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1327 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
1328 V_FW_IQ_CMD_VFN(0));
1329
1330 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1331 FW_LEN16(c));
1332
1333 /* Special handling for firmware event queue */
1334 if (iq == &sc->sge.fwq)
1335 v |= F_FW_IQ_CMD_IQASYNCH;
1336
1337 if (iq->flags & IQ_INTR)
1338 ASSERT(intr_idx < sc->intr_count);
1339 else
1340 v |= F_FW_IQ_CMD_IQANDST;
1341 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
1342
1343 c.type_to_iqandstindex = cpu_to_be32(v |
1344 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1345 V_FW_IQ_CMD_VIID(pi->viid) |
1346 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
1347 c.iqdroprss_to_iqesize = cpu_to_be16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
1348 F_FW_IQ_CMD_IQGTSMODE |
1349 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
1350 V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
1351 c.iqsize = cpu_to_be16(iq->qsize);
1352 c.iqaddr = cpu_to_be64(iq->ba);
1353 if (cong >= 0)
1354 c.iqns_to_fl0congen = BE_32(F_FW_IQ_CMD_IQFLINTCONGEN |
1355 V_FW_IQ_CMD_IQTYPE(cong ?
1356 FW_IQ_IQTYPE_NIC : FW_IQ_IQTYPE_OFLD));
1357
1358 if (fl != NULL) {
1359 unsigned int chip_ver = CHELSIO_CHIP_VERSION(sc->params.chip);
1360
1361 mutex_init(&fl->lock, NULL, MUTEX_DRIVER,
1362 DDI_INTR_PRI(sc->intr_pri));
1363 fl->flags |= FL_MTX;
1364
1365 len = fl->qsize * RX_FL_ESIZE;
1366 rc = alloc_desc_ring(sc, len, DDI_DMA_WRITE, &fl->dhdl,
1367 &fl->ahdl, &fl->ba, (caddr_t *)&fl->desc);
1368 if (rc != 0)
1369 return (rc);
1370
1371 /* Allocate space for one software descriptor per buffer. */
1372 fl->cap = (fl->qsize - sc->sge.stat_len / RX_FL_ESIZE) * 8;
1373 fl->sdesc = kmem_zalloc(sizeof (struct fl_sdesc) * fl->cap,
1374 KM_SLEEP);
1375 fl->needed = fl->cap;
1376 fl->lowat = roundup(sc->sge.fl_starve_threshold, 8);
1377
1378 c.iqns_to_fl0congen |=
1379 cpu_to_be32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1380 F_FW_IQ_CMD_FL0PACKEN | F_FW_IQ_CMD_FL0PADEN);
1381 if (cong >= 0) {
1382 c.iqns_to_fl0congen |=
1383 BE_32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1384 F_FW_IQ_CMD_FL0CONGCIF |
1385 F_FW_IQ_CMD_FL0CONGEN);
1386 }
1387
1388 /* In T6, for egress queue type FL there is internal overhead
1389 * of 16B for header going into FLM module. Hence the maximum
1390 * allowed burst size is 448 bytes. For T4/T5, the hardware
1391 * doesn't coalesce fetch requests if more than 64 bytes of
1392 * Free List pointers are provided, so we use a 128-byte Fetch
1393 * Burst Minimum there (T6 implements coalescing so we can use
1394 * the smaller 64-byte value there).
1395 */
1396
1397 c.fl0dcaen_to_fl0cidxfthresh =
1398 cpu_to_be16(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5
1399 ? X_FETCHBURSTMIN_128B
1400 : X_FETCHBURSTMIN_64B) |
1401 V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5
1402 ? X_FETCHBURSTMAX_512B
1403 : X_FETCHBURSTMAX_256B));
1404 c.fl0size = cpu_to_be16(fl->qsize);
1405 c.fl0addr = cpu_to_be64(fl->ba);
1406 }
1407
1408 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof (c), &c);
1409 if (rc != 0) {
1410 cxgb_printf(sc->dip, CE_WARN,
1411 "failed to create ingress queue: %d", rc);
1412 return (rc);
1413 }
1414
1415 iq->cdesc = iq->desc;
1416 iq->cidx = 0;
1417 iq->gen = 1;
1418 iq->intr_next = iq->intr_params;
1419 iq->adapter = sc;
1420 iq->cntxt_id = be16_to_cpu(c.iqid);
1421 iq->abs_id = be16_to_cpu(c.physiqid);
1422 iq->flags |= IQ_ALLOCATED;
1423 mutex_init(&iq->lock, NULL,
1424 MUTEX_DRIVER, DDI_INTR_PRI(DDI_INTR_PRI(sc->intr_pri)));
1425 iq->polling = 0;
1426
1427 cntxt_id = iq->cntxt_id - sc->sge.iq_start;
1428 if (cntxt_id >= sc->sge.niq) {
1429 panic("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
1430 cntxt_id, sc->sge.niq - 1);
1431 }
1432 sc->sge.iqmap[cntxt_id] = iq;
1433
1434 if (fl != NULL) {
1435 fl->cntxt_id = be16_to_cpu(c.fl0id);
1436 fl->pidx = fl->cidx = 0;
1437 fl->copy_threshold = rx_copy_threshold;
1438
1439 cntxt_id = fl->cntxt_id - sc->sge.eq_start;
1440 if (cntxt_id >= sc->sge.neq) {
1441 panic("%s: fl->cntxt_id (%d) more than the max (%d)",
1442 __func__, cntxt_id, sc->sge.neq - 1);
1443 }
1444 sc->sge.eqmap[cntxt_id] = (void *)fl;
1445
1446 FL_LOCK(fl);
1447 (void) refill_fl(sc, fl, fl->lowat);
1448 FL_UNLOCK(fl);
1449
1450 iq->flags |= IQ_HAS_FL;
1451 }
1452
1453 if (is_t5(sc->params.chip) && cong >= 0) {
1454 uint32_t param, val;
1455
1456 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1457 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
1458 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
1459 if (cong == 0)
1460 val = 1 << 19;
1461 else {
1462 val = 2 << 19;
1463 for (i = 0; i < 4; i++) {
1464 if (cong & (1 << i))
1465 val |= 1 << (i << 2);
1466 }
1467 }
1468
1469 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1470 if (rc != 0) {
1471 /* report error but carry on */
1472 cxgb_printf(sc->dip, CE_WARN,
1473 "failed to set congestion manager context for "
1474 "ingress queue %d: %d", iq->cntxt_id, rc);
1475 }
1476 }
1477
1478 /* Enable IQ interrupts */
1479 iq->state = IQS_IDLE;
1480 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
1481 V_INGRESSQID(iq->cntxt_id));
1482
1483 return (0);
1484 }
1485
1486 static int
free_iq_fl(struct port_info * pi,struct sge_iq * iq,struct sge_fl * fl)1487 free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
1488 {
1489 int rc;
1490
1491 if (iq != NULL) {
1492 struct adapter *sc = iq->adapter;
1493 dev_info_t *dip;
1494
1495 dip = pi ? pi->dip : sc->dip;
1496 if (iq->flags & IQ_ALLOCATED) {
1497 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
1498 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
1499 fl ? fl->cntxt_id : 0xffff, 0xffff);
1500 if (rc != 0) {
1501 cxgb_printf(dip, CE_WARN,
1502 "failed to free queue %p: %d", iq, rc);
1503 return (rc);
1504 }
1505 mutex_destroy(&iq->lock);
1506 iq->flags &= ~IQ_ALLOCATED;
1507 }
1508
1509 if (iq->desc != NULL) {
1510 (void) free_desc_ring(&iq->dhdl, &iq->ahdl);
1511 iq->desc = NULL;
1512 }
1513
1514 bzero(iq, sizeof (*iq));
1515 }
1516
1517 if (fl != NULL) {
1518 if (fl->sdesc != NULL) {
1519 FL_LOCK(fl);
1520 free_fl_bufs(fl);
1521 FL_UNLOCK(fl);
1522
1523 kmem_free(fl->sdesc, sizeof (struct fl_sdesc) *
1524 fl->cap);
1525 fl->sdesc = NULL;
1526 }
1527
1528 if (fl->desc != NULL) {
1529 (void) free_desc_ring(&fl->dhdl, &fl->ahdl);
1530 fl->desc = NULL;
1531 }
1532
1533 if (fl->flags & FL_MTX) {
1534 mutex_destroy(&fl->lock);
1535 fl->flags &= ~FL_MTX;
1536 }
1537
1538 bzero(fl, sizeof (struct sge_fl));
1539 }
1540
1541 return (0);
1542 }
1543
1544 static int
alloc_fwq(struct adapter * sc)1545 alloc_fwq(struct adapter *sc)
1546 {
1547 int rc, intr_idx;
1548 struct sge_iq *fwq = &sc->sge.fwq;
1549
1550 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE);
1551 fwq->flags |= IQ_INTR; /* always */
1552 intr_idx = sc->intr_count > 1 ? 1 : 0;
1553 rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
1554 if (rc != 0) {
1555 cxgb_printf(sc->dip, CE_WARN,
1556 "failed to create firmware event queue: %d.", rc);
1557 return (rc);
1558 }
1559
1560 return (0);
1561 }
1562
1563 static int
free_fwq(struct adapter * sc)1564 free_fwq(struct adapter *sc)
1565 {
1566
1567 return (free_iq_fl(NULL, &sc->sge.fwq, NULL));
1568 }
1569
1570 #ifdef TCP_OFFLOAD_ENABLE
1571 static int
alloc_mgmtq(struct adapter * sc)1572 alloc_mgmtq(struct adapter *sc)
1573 {
1574 int rc;
1575 struct sge_wrq *mgmtq = &sc->sge.mgmtq;
1576
1577 init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
1578 sc->sge.fwq.cntxt_id);
1579 rc = alloc_wrq(sc, NULL, mgmtq, 0);
1580 if (rc != 0) {
1581 cxgb_printf(sc->dip, CE_WARN,
1582 "failed to create management queue: %d\n", rc);
1583 return (rc);
1584 }
1585
1586 return (0);
1587 }
1588 #endif
1589
1590 static int
alloc_rxq(struct port_info * pi,struct sge_rxq * rxq,int intr_idx,int i)1591 alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int i)
1592 {
1593 int rc;
1594
1595 rxq->port = pi;
1596 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx,
1597 t4_get_tp_ch_map(pi->adapter, pi->tx_chan));
1598 if (rc != 0)
1599 return (rc);
1600
1601 rxq->ksp = setup_rxq_kstats(pi, rxq, i);
1602
1603 return (rc);
1604 }
1605
1606 static int
free_rxq(struct port_info * pi,struct sge_rxq * rxq)1607 free_rxq(struct port_info *pi, struct sge_rxq *rxq)
1608 {
1609 int rc;
1610
1611 if (rxq->ksp != NULL) {
1612 kstat_delete(rxq->ksp);
1613 rxq->ksp = NULL;
1614 }
1615
1616 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
1617 if (rc == 0)
1618 bzero(&rxq->fl, sizeof (*rxq) - offsetof(struct sge_rxq, fl));
1619
1620 return (rc);
1621 }
1622
1623 #ifdef TCP_OFFLOAD_ENABLE
1624 static int
alloc_ofld_rxq(struct port_info * pi,struct sge_ofld_rxq * ofld_rxq,int intr_idx)1625 alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
1626 int intr_idx)
1627 {
1628 int rc;
1629
1630 rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
1631 t4_get_tp_ch_map(pi->adapter, pi->tx_chan));
1632 if (rc != 0)
1633 return (rc);
1634
1635 return (rc);
1636 }
1637
1638 static int
free_ofld_rxq(struct port_info * pi,struct sge_ofld_rxq * ofld_rxq)1639 free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
1640 {
1641 int rc;
1642
1643 rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl);
1644 if (rc == 0)
1645 bzero(&ofld_rxq->fl, sizeof (*ofld_rxq) -
1646 offsetof(struct sge_ofld_rxq, fl));
1647
1648 return (rc);
1649 }
1650 #endif
1651
1652 static int
ctrl_eq_alloc(struct adapter * sc,struct sge_eq * eq)1653 ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
1654 {
1655 int rc, cntxt_id;
1656 struct fw_eq_ctrl_cmd c;
1657
1658 bzero(&c, sizeof (c));
1659
1660 c.op_to_vfn = BE_32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
1661 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
1662 V_FW_EQ_CTRL_CMD_VFN(0));
1663 c.alloc_to_len16 = BE_32(F_FW_EQ_CTRL_CMD_ALLOC |
1664 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
1665 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* TODO */
1666 c.physeqid_pkd = BE_32(0);
1667 c.fetchszm_to_iqid =
1668 BE_32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1669 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
1670 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
1671 c.dcaen_to_eqsize =
1672 BE_32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1673 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1674 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1675 V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
1676 c.eqaddr = BE_64(eq->ba);
1677
1678 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof (c), &c);
1679 if (rc != 0) {
1680 cxgb_printf(sc->dip, CE_WARN,
1681 "failed to create control queue %d: %d", eq->tx_chan, rc);
1682 return (rc);
1683 }
1684 eq->flags |= EQ_ALLOCATED;
1685
1686 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(BE_32(c.cmpliqid_eqid));
1687 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1688 if (cntxt_id >= sc->sge.neq)
1689 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1690 cntxt_id, sc->sge.neq - 1);
1691 sc->sge.eqmap[cntxt_id] = eq;
1692
1693 return (rc);
1694 }
1695
1696 static int
eth_eq_alloc(struct adapter * sc,struct port_info * pi,struct sge_eq * eq)1697 eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
1698 {
1699 int rc, cntxt_id;
1700 struct fw_eq_eth_cmd c;
1701
1702 bzero(&c, sizeof (c));
1703
1704 c.op_to_vfn = BE_32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
1705 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
1706 V_FW_EQ_ETH_CMD_VFN(0));
1707 c.alloc_to_len16 = BE_32(F_FW_EQ_ETH_CMD_ALLOC |
1708 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
1709 c.autoequiqe_to_viid = BE_32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
1710 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->viid));
1711 c.fetchszm_to_iqid =
1712 BE_32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1713 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
1714 V_FW_EQ_ETH_CMD_IQID(eq->iqid));
1715 c.dcaen_to_eqsize = BE_32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1716 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1717 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1718 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
1719 c.eqaddr = BE_64(eq->ba);
1720
1721 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof (c), &c);
1722 if (rc != 0) {
1723 cxgb_printf(pi->dip, CE_WARN,
1724 "failed to create Ethernet egress queue: %d", rc);
1725 return (rc);
1726 }
1727 eq->flags |= EQ_ALLOCATED;
1728
1729 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(BE_32(c.eqid_pkd));
1730 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1731 if (cntxt_id >= sc->sge.neq)
1732 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1733 cntxt_id, sc->sge.neq - 1);
1734 sc->sge.eqmap[cntxt_id] = eq;
1735
1736 return (rc);
1737 }
1738
1739 #ifdef TCP_OFFLOAD_ENABLE
1740 static int
ofld_eq_alloc(struct adapter * sc,struct port_info * pi,struct sge_eq * eq)1741 ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
1742 {
1743 int rc, cntxt_id;
1744 struct fw_eq_ofld_cmd c;
1745
1746 bzero(&c, sizeof (c));
1747
1748 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
1749 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
1750 V_FW_EQ_OFLD_CMD_VFN(0));
1751 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
1752 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
1753 c.fetchszm_to_iqid =
1754 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
1755 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
1756 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
1757 c.dcaen_to_eqsize =
1758 BE_32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1759 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1760 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
1761 V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize));
1762 c.eqaddr = BE_64(eq->ba);
1763
1764 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof (c), &c);
1765 if (rc != 0) {
1766 cxgb_printf(pi->dip, CE_WARN,
1767 "failed to create egress queue for TCP offload: %d", rc);
1768 return (rc);
1769 }
1770 eq->flags |= EQ_ALLOCATED;
1771
1772 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(BE_32(c.eqid_pkd));
1773 cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1774 if (cntxt_id >= sc->sge.neq)
1775 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1776 cntxt_id, sc->sge.neq - 1);
1777 sc->sge.eqmap[cntxt_id] = eq;
1778
1779 return (rc);
1780 }
1781 #endif
1782
1783 static int
alloc_eq(struct adapter * sc,struct port_info * pi,struct sge_eq * eq)1784 alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
1785 {
1786 int rc;
1787 size_t len;
1788
1789 mutex_init(&eq->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(sc->intr_pri));
1790 eq->flags |= EQ_MTX;
1791
1792 len = eq->qsize * EQ_ESIZE;
1793 rc = alloc_desc_ring(sc, len, DDI_DMA_WRITE, &eq->desc_dhdl,
1794 &eq->desc_ahdl, &eq->ba, (caddr_t *)&eq->desc);
1795 if (rc != 0)
1796 return (rc);
1797
1798 eq->cap = eq->qsize - sc->sge.stat_len / EQ_ESIZE;
1799 eq->spg = (void *)&eq->desc[eq->cap];
1800 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */
1801 eq->pidx = eq->cidx = 0;
1802 eq->doorbells = sc->doorbells;
1803
1804 switch (eq->flags & EQ_TYPEMASK) {
1805 case EQ_CTRL:
1806 rc = ctrl_eq_alloc(sc, eq);
1807 break;
1808
1809 case EQ_ETH:
1810 rc = eth_eq_alloc(sc, pi, eq);
1811 break;
1812
1813 #ifdef TCP_OFFLOAD_ENABLE
1814 case EQ_OFLD:
1815 rc = ofld_eq_alloc(sc, pi, eq);
1816 break;
1817 #endif
1818
1819 default:
1820 panic("%s: invalid eq type %d.", __func__,
1821 eq->flags & EQ_TYPEMASK);
1822 }
1823
1824 if (eq->doorbells &
1825 (DOORBELL_UDB | DOORBELL_UDBWC | DOORBELL_WCWR)) {
1826 uint32_t s_qpp = sc->sge.s_qpp;
1827 uint32_t mask = (1 << s_qpp) - 1;
1828 volatile uint8_t *udb;
1829
1830 udb = (volatile uint8_t *)sc->reg1p + UDBS_DB_OFFSET;
1831 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */
1832 eq->udb_qid = eq->cntxt_id & mask; /* id in page */
1833 if (eq->udb_qid > PAGE_SIZE / UDBS_SEG_SIZE)
1834 eq->doorbells &= ~DOORBELL_WCWR;
1835 else {
1836 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */
1837 eq->udb_qid = 0;
1838 }
1839 eq->udb = (volatile void *)udb;
1840 }
1841
1842 if (rc != 0) {
1843 cxgb_printf(sc->dip, CE_WARN,
1844 "failed to allocate egress queue(%d): %d",
1845 eq->flags & EQ_TYPEMASK, rc);
1846 }
1847
1848 return (rc);
1849 }
1850
1851 static int
free_eq(struct adapter * sc,struct sge_eq * eq)1852 free_eq(struct adapter *sc, struct sge_eq *eq)
1853 {
1854 int rc;
1855
1856 if (eq->flags & EQ_ALLOCATED) {
1857 switch (eq->flags & EQ_TYPEMASK) {
1858 case EQ_CTRL:
1859 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0,
1860 eq->cntxt_id);
1861 break;
1862
1863 case EQ_ETH:
1864 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0,
1865 eq->cntxt_id);
1866 break;
1867
1868 #ifdef TCP_OFFLOAD_ENABLE
1869 case EQ_OFLD:
1870 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0,
1871 eq->cntxt_id);
1872 break;
1873 #endif
1874
1875 default:
1876 panic("%s: invalid eq type %d.", __func__,
1877 eq->flags & EQ_TYPEMASK);
1878 }
1879 if (rc != 0) {
1880 cxgb_printf(sc->dip, CE_WARN,
1881 "failed to free egress queue (%d): %d",
1882 eq->flags & EQ_TYPEMASK, rc);
1883 return (rc);
1884 }
1885 eq->flags &= ~EQ_ALLOCATED;
1886 }
1887
1888 if (eq->desc != NULL) {
1889 (void) free_desc_ring(&eq->desc_dhdl, &eq->desc_ahdl);
1890 eq->desc = NULL;
1891 }
1892
1893 if (eq->flags & EQ_MTX)
1894 mutex_destroy(&eq->lock);
1895
1896 bzero(eq, sizeof (*eq));
1897 return (0);
1898 }
1899
1900 #ifdef TCP_OFFLOAD_ENABLE
1901 /* ARGSUSED */
1902 static int
alloc_wrq(struct adapter * sc,struct port_info * pi,struct sge_wrq * wrq,int idx)1903 alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq,
1904 int idx)
1905 {
1906 int rc;
1907
1908 rc = alloc_eq(sc, pi, &wrq->eq);
1909 if (rc != 0)
1910 return (rc);
1911
1912 wrq->adapter = sc;
1913 wrq->wr_list.head = NULL;
1914 wrq->wr_list.tail = NULL;
1915
1916 /*
1917 * TODO: use idx to figure out what kind of wrq this is and install
1918 * useful kstats for it.
1919 */
1920
1921 return (rc);
1922 }
1923
1924 static int
free_wrq(struct adapter * sc,struct sge_wrq * wrq)1925 free_wrq(struct adapter *sc, struct sge_wrq *wrq)
1926 {
1927 int rc;
1928
1929 rc = free_eq(sc, &wrq->eq);
1930 if (rc != 0)
1931 return (rc);
1932
1933 bzero(wrq, sizeof (*wrq));
1934 return (0);
1935 }
1936 #endif
1937
1938 static int
alloc_txq(struct port_info * pi,struct sge_txq * txq,int idx)1939 alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
1940 {
1941 int rc, i;
1942 struct adapter *sc = pi->adapter;
1943 struct sge_eq *eq = &txq->eq;
1944
1945 rc = alloc_eq(sc, pi, eq);
1946 if (rc != 0)
1947 return (rc);
1948
1949 txq->port = pi;
1950 txq->sdesc = kmem_zalloc(sizeof (struct tx_sdesc) * eq->cap, KM_SLEEP);
1951 txq->txb_size = eq->qsize * tx_copy_threshold;
1952 rc = alloc_tx_copybuffer(sc, txq->txb_size, &txq->txb_dhdl,
1953 &txq->txb_ahdl, &txq->txb_ba, &txq->txb_va);
1954 if (rc == 0)
1955 txq->txb_avail = txq->txb_size;
1956 else
1957 txq->txb_avail = txq->txb_size = 0;
1958
1959 /*
1960 * TODO: is this too low? Worst case would need around 4 times qsize
1961 * (all tx descriptors filled to the brim with SGLs, with each entry in
1962 * the SGL coming from a distinct DMA handle). Increase tx_dhdl_total
1963 * if you see too many dma_hdl_failed.
1964 */
1965 txq->tx_dhdl_total = eq->qsize * 2;
1966 txq->tx_dhdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
1967 txq->tx_dhdl_total, KM_SLEEP);
1968 for (i = 0; i < txq->tx_dhdl_total; i++) {
1969 rc = ddi_dma_alloc_handle(sc->dip, &sc->sge.dma_attr_tx,
1970 DDI_DMA_SLEEP, 0, &txq->tx_dhdl[i]);
1971 if (rc != DDI_SUCCESS) {
1972 cxgb_printf(sc->dip, CE_WARN,
1973 "%s: failed to allocate DMA handle (%d)",
1974 __func__, rc);
1975 return (rc == DDI_DMA_NORESOURCES ? ENOMEM : EINVAL);
1976 }
1977 txq->tx_dhdl_avail++;
1978 }
1979
1980 txq->ksp = setup_txq_kstats(pi, txq, idx);
1981
1982 return (rc);
1983 }
1984
1985 static int
free_txq(struct port_info * pi,struct sge_txq * txq)1986 free_txq(struct port_info *pi, struct sge_txq *txq)
1987 {
1988 int i;
1989 struct adapter *sc = pi->adapter;
1990 struct sge_eq *eq = &txq->eq;
1991
1992 if (txq->ksp != NULL) {
1993 kstat_delete(txq->ksp);
1994 txq->ksp = NULL;
1995 }
1996
1997 if (txq->txb_va != NULL) {
1998 (void) free_desc_ring(&txq->txb_dhdl, &txq->txb_ahdl);
1999 txq->txb_va = NULL;
2000 }
2001
2002 if (txq->sdesc != NULL) {
2003 struct tx_sdesc *sd;
2004 ddi_dma_handle_t hdl;
2005
2006 TXQ_LOCK(txq);
2007 while (eq->cidx != eq->pidx) {
2008 sd = &txq->sdesc[eq->cidx];
2009
2010 for (i = sd->hdls_used; i; i--) {
2011 hdl = txq->tx_dhdl[txq->tx_dhdl_cidx];
2012 (void) ddi_dma_unbind_handle(hdl);
2013 if (++txq->tx_dhdl_cidx == txq->tx_dhdl_total)
2014 txq->tx_dhdl_cidx = 0;
2015 }
2016
2017 ASSERT(sd->m);
2018 freemsgchain(sd->m);
2019
2020 eq->cidx += sd->desc_used;
2021 if (eq->cidx >= eq->cap)
2022 eq->cidx -= eq->cap;
2023
2024 txq->txb_avail += txq->txb_used;
2025 }
2026 ASSERT(txq->tx_dhdl_cidx == txq->tx_dhdl_pidx);
2027 ASSERT(txq->txb_avail == txq->txb_size);
2028 TXQ_UNLOCK(txq);
2029
2030 kmem_free(txq->sdesc, sizeof (struct tx_sdesc) * eq->cap);
2031 txq->sdesc = NULL;
2032 }
2033
2034 if (txq->tx_dhdl != NULL) {
2035 for (i = 0; i < txq->tx_dhdl_total; i++) {
2036 if (txq->tx_dhdl[i] != NULL)
2037 ddi_dma_free_handle(&txq->tx_dhdl[i]);
2038 }
2039 }
2040
2041 (void) free_eq(sc, &txq->eq);
2042
2043 bzero(txq, sizeof (*txq));
2044 return (0);
2045 }
2046
2047 /*
2048 * Allocates a block of contiguous memory for DMA. Can be used to allocate
2049 * memory for descriptor rings or for tx/rx copy buffers.
2050 *
2051 * Caller does not have to clean up anything if this function fails, it cleans
2052 * up after itself.
2053 *
2054 * Caller provides the following:
2055 * len length of the block of memory to allocate.
2056 * flags DDI_DMA_* flags to use (CONSISTENT/STREAMING, READ/WRITE/RDWR)
2057 * acc_attr device access attributes for the allocation.
2058 * dma_attr DMA attributes for the allocation
2059 *
2060 * If the function is successful it fills up this information:
2061 * dma_hdl DMA handle for the allocated memory
2062 * acc_hdl access handle for the allocated memory
2063 * ba bus address of the allocated memory
2064 * va KVA of the allocated memory.
2065 */
2066 static int
alloc_dma_memory(struct adapter * sc,size_t len,int flags,ddi_device_acc_attr_t * acc_attr,ddi_dma_attr_t * dma_attr,ddi_dma_handle_t * dma_hdl,ddi_acc_handle_t * acc_hdl,uint64_t * pba,caddr_t * pva)2067 alloc_dma_memory(struct adapter *sc, size_t len, int flags,
2068 ddi_device_acc_attr_t *acc_attr, ddi_dma_attr_t *dma_attr,
2069 ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl,
2070 uint64_t *pba, caddr_t *pva)
2071 {
2072 int rc;
2073 ddi_dma_handle_t dhdl;
2074 ddi_acc_handle_t ahdl;
2075 ddi_dma_cookie_t cookie;
2076 uint_t ccount;
2077 caddr_t va;
2078 size_t real_len;
2079
2080 *pva = NULL;
2081
2082 /*
2083 * DMA handle.
2084 */
2085 rc = ddi_dma_alloc_handle(sc->dip, dma_attr, DDI_DMA_SLEEP, 0, &dhdl);
2086 if (rc != DDI_SUCCESS) {
2087 cxgb_printf(sc->dip, CE_WARN,
2088 "failed to allocate DMA handle: %d", rc);
2089
2090 return (rc == DDI_DMA_NORESOURCES ? ENOMEM : EINVAL);
2091 }
2092
2093 /*
2094 * Memory suitable for DMA.
2095 */
2096 rc = ddi_dma_mem_alloc(dhdl, len, acc_attr,
2097 flags & DDI_DMA_CONSISTENT ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
2098 DDI_DMA_SLEEP, 0, &va, &real_len, &ahdl);
2099 if (rc != DDI_SUCCESS) {
2100 cxgb_printf(sc->dip, CE_WARN,
2101 "failed to allocate DMA memory: %d", rc);
2102
2103 ddi_dma_free_handle(&dhdl);
2104 return (ENOMEM);
2105 }
2106
2107 if (len != real_len) {
2108 cxgb_printf(sc->dip, CE_WARN,
2109 "%s: len (%u) != real_len (%u)\n", len, real_len);
2110 }
2111
2112 /*
2113 * DMA bindings.
2114 */
2115 rc = ddi_dma_addr_bind_handle(dhdl, NULL, va, real_len, flags, NULL,
2116 NULL, &cookie, &ccount);
2117 if (rc != DDI_DMA_MAPPED) {
2118 cxgb_printf(sc->dip, CE_WARN,
2119 "failed to map DMA memory: %d", rc);
2120
2121 ddi_dma_mem_free(&ahdl);
2122 ddi_dma_free_handle(&dhdl);
2123 return (ENOMEM);
2124 }
2125 if (ccount != 1) {
2126 cxgb_printf(sc->dip, CE_WARN,
2127 "unusable DMA mapping (%d segments)", ccount);
2128 (void) free_desc_ring(&dhdl, &ahdl);
2129 }
2130
2131 bzero(va, real_len);
2132 *dma_hdl = dhdl;
2133 *acc_hdl = ahdl;
2134 *pba = cookie.dmac_laddress;
2135 *pva = va;
2136
2137 return (0);
2138 }
2139
2140 static int
free_dma_memory(ddi_dma_handle_t * dhdl,ddi_acc_handle_t * ahdl)2141 free_dma_memory(ddi_dma_handle_t *dhdl, ddi_acc_handle_t *ahdl)
2142 {
2143 (void) ddi_dma_unbind_handle(*dhdl);
2144 ddi_dma_mem_free(ahdl);
2145 ddi_dma_free_handle(dhdl);
2146
2147 return (0);
2148 }
2149
2150 static int
alloc_desc_ring(struct adapter * sc,size_t len,int rw,ddi_dma_handle_t * dma_hdl,ddi_acc_handle_t * acc_hdl,uint64_t * pba,caddr_t * pva)2151 alloc_desc_ring(struct adapter *sc, size_t len, int rw,
2152 ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl,
2153 uint64_t *pba, caddr_t *pva)
2154 {
2155 ddi_device_acc_attr_t *acc_attr = &sc->sge.acc_attr_desc;
2156 ddi_dma_attr_t *dma_attr = &sc->sge.dma_attr_desc;
2157
2158 return (alloc_dma_memory(sc, len, DDI_DMA_CONSISTENT | rw, acc_attr,
2159 dma_attr, dma_hdl, acc_hdl, pba, pva));
2160 }
2161
2162 static int
free_desc_ring(ddi_dma_handle_t * dhdl,ddi_acc_handle_t * ahdl)2163 free_desc_ring(ddi_dma_handle_t *dhdl, ddi_acc_handle_t *ahdl)
2164 {
2165 return (free_dma_memory(dhdl, ahdl));
2166 }
2167
2168 static int
alloc_tx_copybuffer(struct adapter * sc,size_t len,ddi_dma_handle_t * dma_hdl,ddi_acc_handle_t * acc_hdl,uint64_t * pba,caddr_t * pva)2169 alloc_tx_copybuffer(struct adapter *sc, size_t len,
2170 ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl,
2171 uint64_t *pba, caddr_t *pva)
2172 {
2173 ddi_device_acc_attr_t *acc_attr = &sc->sge.acc_attr_tx;
2174 ddi_dma_attr_t *dma_attr = &sc->sge.dma_attr_desc; /* NOT dma_attr_tx */
2175
2176 return (alloc_dma_memory(sc, len, DDI_DMA_STREAMING | DDI_DMA_WRITE,
2177 acc_attr, dma_attr, dma_hdl, acc_hdl, pba, pva));
2178 }
2179
2180 static inline bool
is_new_response(const struct sge_iq * iq,struct rsp_ctrl ** ctrl)2181 is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
2182 {
2183 (void) ddi_dma_sync(iq->dhdl, (uintptr_t)iq->cdesc -
2184 (uintptr_t)iq->desc, iq->esize, DDI_DMA_SYNC_FORKERNEL);
2185
2186 *ctrl = (void *)((uintptr_t)iq->cdesc +
2187 (iq->esize - sizeof (struct rsp_ctrl)));
2188
2189 return ((((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen));
2190 }
2191
2192 static inline void
iq_next(struct sge_iq * iq)2193 iq_next(struct sge_iq *iq)
2194 {
2195 iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
2196 if (++iq->cidx == iq->qsize - 1) {
2197 iq->cidx = 0;
2198 iq->gen ^= 1;
2199 iq->cdesc = iq->desc;
2200 }
2201 }
2202
2203 /*
2204 * Fill up the freelist by upto nbufs and maybe ring its doorbell.
2205 *
2206 * Returns non-zero to indicate that it should be added to the list of starving
2207 * freelists.
2208 */
2209 static int
refill_fl(struct adapter * sc,struct sge_fl * fl,int nbufs)2210 refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs)
2211 {
2212 uint64_t *d = &fl->desc[fl->pidx];
2213 struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
2214
2215 FL_LOCK_ASSERT_OWNED(fl);
2216 ASSERT(nbufs >= 0);
2217
2218 if (nbufs > fl->needed)
2219 nbufs = fl->needed;
2220
2221 while (nbufs--) {
2222 if (sd->rxb != NULL) {
2223 if (sd->rxb->ref_cnt == 1) {
2224 /*
2225 * Buffer is available for recycling. Two ways
2226 * this can happen:
2227 *
2228 * a) All the packets DMA'd into it last time
2229 * around were within the rx_copy_threshold
2230 * and no part of the buffer was ever passed
2231 * up (ref_cnt never went over 1).
2232 *
2233 * b) Packets DMA'd into the buffer were passed
2234 * up but have all been freed by the upper
2235 * layers by now (ref_cnt went over 1 but is
2236 * now back to 1).
2237 *
2238 * Either way the bus address in the descriptor
2239 * ring is already valid.
2240 */
2241 ASSERT(*d == cpu_to_be64(sd->rxb->ba));
2242 d++;
2243 goto recycled;
2244 } else {
2245 /*
2246 * Buffer still in use and we need a
2247 * replacement. But first release our reference
2248 * on the existing buffer.
2249 */
2250 rxbuf_free(sd->rxb);
2251 }
2252 }
2253
2254 sd->rxb = rxbuf_alloc(sc->sge.rxbuf_cache, KM_NOSLEEP, 1);
2255 if (sd->rxb == NULL)
2256 break;
2257 *d++ = cpu_to_be64(sd->rxb->ba);
2258
2259 recycled: fl->pending++;
2260 sd++;
2261 fl->needed--;
2262 if (++fl->pidx == fl->cap) {
2263 fl->pidx = 0;
2264 sd = fl->sdesc;
2265 d = fl->desc;
2266 }
2267 }
2268
2269 if (fl->pending >= 8)
2270 ring_fl_db(sc, fl);
2271
2272 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
2273 }
2274
2275 #ifndef TAILQ_FOREACH_SAFE
2276 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
2277 for ((var) = TAILQ_FIRST((head)); \
2278 (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
2279 (var) = (tvar))
2280 #endif
2281
2282 /*
2283 * Attempt to refill all starving freelists.
2284 */
2285 static void
refill_sfl(void * arg)2286 refill_sfl(void *arg)
2287 {
2288 struct adapter *sc = arg;
2289 struct sge_fl *fl, *fl_temp;
2290
2291 mutex_enter(&sc->sfl_lock);
2292 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
2293 FL_LOCK(fl);
2294 (void) refill_fl(sc, fl, 64);
2295 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
2296 TAILQ_REMOVE(&sc->sfl, fl, link);
2297 fl->flags &= ~FL_STARVING;
2298 }
2299 FL_UNLOCK(fl);
2300 }
2301
2302 if (!TAILQ_EMPTY(&sc->sfl) != 0)
2303 sc->sfl_timer = timeout(refill_sfl, sc, drv_usectohz(100000));
2304 mutex_exit(&sc->sfl_lock);
2305 }
2306
2307 static void
add_fl_to_sfl(struct adapter * sc,struct sge_fl * fl)2308 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
2309 {
2310 mutex_enter(&sc->sfl_lock);
2311 FL_LOCK(fl);
2312 if ((fl->flags & FL_DOOMED) == 0) {
2313 if (TAILQ_EMPTY(&sc->sfl) != 0) {
2314 sc->sfl_timer = timeout(refill_sfl, sc,
2315 drv_usectohz(100000));
2316 }
2317 fl->flags |= FL_STARVING;
2318 TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
2319 }
2320 FL_UNLOCK(fl);
2321 mutex_exit(&sc->sfl_lock);
2322 }
2323
2324 static void
free_fl_bufs(struct sge_fl * fl)2325 free_fl_bufs(struct sge_fl *fl)
2326 {
2327 struct fl_sdesc *sd;
2328 unsigned int i;
2329
2330 FL_LOCK_ASSERT_OWNED(fl);
2331
2332 for (i = 0; i < fl->cap; i++) {
2333 sd = &fl->sdesc[i];
2334
2335 if (sd->rxb != NULL) {
2336 rxbuf_free(sd->rxb);
2337 sd->rxb = NULL;
2338 }
2339 }
2340 }
2341
2342 /*
2343 * Note that fl->cidx and fl->offset are left unchanged in case of failure.
2344 */
2345 static mblk_t *
get_fl_payload(struct adapter * sc,struct sge_fl * fl,uint32_t len_newbuf,int * fl_bufs_used)2346 get_fl_payload(struct adapter *sc, struct sge_fl *fl,
2347 uint32_t len_newbuf, int *fl_bufs_used)
2348 {
2349 struct mblk_pair frame = {0};
2350 struct rxbuf *rxb;
2351 mblk_t *m = NULL;
2352 uint_t nbuf = 0, len, copy, n;
2353 uint32_t cidx, offset, rcidx, roffset;
2354
2355 /*
2356 * The SGE won't pack a new frame into the current buffer if the entire
2357 * payload doesn't fit in the remaining space. Move on to the next buf
2358 * in that case.
2359 */
2360 rcidx = fl->cidx;
2361 roffset = fl->offset;
2362 if (fl->offset > 0 && len_newbuf & F_RSPD_NEWBUF) {
2363 fl->offset = 0;
2364 if (++fl->cidx == fl->cap)
2365 fl->cidx = 0;
2366 nbuf++;
2367 }
2368 cidx = fl->cidx;
2369 offset = fl->offset;
2370
2371 len = G_RSPD_LEN(len_newbuf); /* pktshift + payload length */
2372 copy = (len <= fl->copy_threshold);
2373 if (copy != 0) {
2374 frame.head = m = allocb(len, BPRI_HI);
2375 if (m == NULL) {
2376 fl->allocb_fail++;
2377 cmn_err(CE_WARN,"%s: mbuf allocation failure "
2378 "count = %llu", __func__,
2379 (unsigned long long)fl->allocb_fail);
2380 fl->cidx = rcidx;
2381 fl->offset = roffset;
2382 return (NULL);
2383 }
2384 }
2385
2386 while (len) {
2387 rxb = fl->sdesc[cidx].rxb;
2388 n = min(len, rxb->buf_size - offset);
2389
2390 (void) ddi_dma_sync(rxb->dhdl, offset, n,
2391 DDI_DMA_SYNC_FORKERNEL);
2392
2393 if (copy != 0)
2394 bcopy(rxb->va + offset, m->b_wptr, n);
2395 else {
2396 m = desballoc((unsigned char *)rxb->va + offset, n,
2397 BPRI_HI, &rxb->freefunc);
2398 if (m == NULL) {
2399 fl->allocb_fail++;
2400 cmn_err(CE_WARN,
2401 "%s: mbuf allocation failure "
2402 "count = %llu", __func__,
2403 (unsigned long long)fl->allocb_fail);
2404 if (frame.head)
2405 freemsgchain(frame.head);
2406 fl->cidx = rcidx;
2407 fl->offset = roffset;
2408 return (NULL);
2409 }
2410 atomic_inc_uint(&rxb->ref_cnt);
2411 if (frame.head != NULL)
2412 frame.tail->b_cont = m;
2413 else
2414 frame.head = m;
2415 frame.tail = m;
2416 }
2417 m->b_wptr += n;
2418 len -= n;
2419 offset += roundup(n, sc->sge.fl_align);
2420 ASSERT(offset <= rxb->buf_size);
2421 if (offset == rxb->buf_size) {
2422 offset = 0;
2423 if (++cidx == fl->cap)
2424 cidx = 0;
2425 nbuf++;
2426 }
2427 }
2428
2429 fl->cidx = cidx;
2430 fl->offset = offset;
2431 (*fl_bufs_used) += nbuf;
2432
2433 ASSERT(frame.head != NULL);
2434 return (frame.head);
2435 }
2436
2437 /*
2438 * We'll do immediate data tx for non-LSO, but only when not coalescing. We're
2439 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
2440 * of immediate data.
2441 */
2442 #define IMM_LEN ( \
2443 2 * EQ_ESIZE \
2444 - sizeof (struct fw_eth_tx_pkt_wr) \
2445 - sizeof (struct cpl_tx_pkt_core))
2446
2447 /*
2448 * Returns non-zero on failure, no need to cleanup anything in that case.
2449 *
2450 * Note 1: We always try to pull up the mblk if required and return E2BIG only
2451 * if this fails.
2452 *
2453 * Note 2: We'll also pullup incoming mblk if HW_LSO is set and the first mblk
2454 * does not have the TCP header in it.
2455 */
2456 static int
get_frame_txinfo(struct sge_txq * txq,mblk_t ** fp,struct txinfo * txinfo,int sgl_only)2457 get_frame_txinfo(struct sge_txq *txq, mblk_t **fp, struct txinfo *txinfo,
2458 int sgl_only)
2459 {
2460 uint32_t flags = 0, len, n;
2461 mblk_t *m = *fp;
2462 int rc;
2463
2464 TXQ_LOCK_ASSERT_OWNED(txq); /* will manipulate txb and dma_hdls */
2465
2466 mac_hcksum_get(m, NULL, NULL, NULL, NULL, &flags);
2467 txinfo->flags = flags;
2468
2469 mac_lso_get(m, &txinfo->mss, &flags);
2470 txinfo->flags |= flags;
2471
2472 if (flags & HW_LSO)
2473 sgl_only = 1; /* Do not allow immediate data with LSO */
2474
2475 start: txinfo->nsegs = 0;
2476 txinfo->hdls_used = 0;
2477 txinfo->txb_used = 0;
2478 txinfo->len = 0;
2479
2480 /* total length and a rough estimate of # of segments */
2481 n = 0;
2482 for (; m; m = m->b_cont) {
2483 len = MBLKL(m);
2484 n += (len / PAGE_SIZE) + 1;
2485 txinfo->len += len;
2486 }
2487 m = *fp;
2488
2489 if (n >= TX_SGL_SEGS || (flags & HW_LSO && MBLKL(m) < 50)) {
2490 txq->pullup_early++;
2491 m = msgpullup(*fp, -1);
2492 if (m == NULL) {
2493 txq->pullup_failed++;
2494 return (E2BIG); /* (*fp) left as it was */
2495 }
2496 freemsg(*fp);
2497 *fp = m;
2498 mac_hcksum_set(m, 0, 0, 0, 0, txinfo->flags);
2499 }
2500
2501 if (txinfo->len <= IMM_LEN && !sgl_only)
2502 return (0); /* nsegs = 0 tells caller to use imm. tx */
2503
2504 if (txinfo->len <= txq->copy_threshold &&
2505 copy_into_txb(txq, m, txinfo->len, txinfo) == 0)
2506 goto done;
2507
2508 for (; m; m = m->b_cont) {
2509
2510 len = MBLKL(m);
2511
2512 /* Use tx copy buffer if this mblk is small enough */
2513 if (len <= txq->copy_threshold &&
2514 copy_into_txb(txq, m, len, txinfo) == 0)
2515 continue;
2516
2517 /* Add DMA bindings for this mblk to the SGL */
2518 rc = add_mblk(txq, txinfo, m, len);
2519
2520 if (rc == E2BIG ||
2521 (txinfo->nsegs == TX_SGL_SEGS && m->b_cont)) {
2522
2523 txq->pullup_late++;
2524 m = msgpullup(*fp, -1);
2525 if (m != NULL) {
2526 free_txinfo_resources(txq, txinfo);
2527 freemsg(*fp);
2528 *fp = m;
2529 mac_hcksum_set(m, 0, 0, 0, 0, txinfo->flags);
2530 goto start;
2531 }
2532
2533 txq->pullup_failed++;
2534 rc = E2BIG;
2535 }
2536
2537 if (rc != 0) {
2538 free_txinfo_resources(txq, txinfo);
2539 return (rc);
2540 }
2541 }
2542
2543 ASSERT(txinfo->nsegs > 0 && txinfo->nsegs <= TX_SGL_SEGS);
2544
2545 done:
2546
2547 /*
2548 * Store the # of flits required to hold this frame's SGL in nflits. An
2549 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
2550 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used
2551 * then len1 must be set to 0.
2552 */
2553 n = txinfo->nsegs - 1;
2554 txinfo->nflits = (3 * n) / 2 + (n & 1) + 2;
2555 if (n & 1)
2556 txinfo->sgl.sge[n / 2].len[1] = cpu_to_be32(0);
2557
2558 txinfo->sgl.cmd_nsge = cpu_to_be32(V_ULPTX_CMD((u32)ULP_TX_SC_DSGL) |
2559 V_ULPTX_NSGE(txinfo->nsegs));
2560
2561 return (0);
2562 }
2563
2564 static inline int
fits_in_txb(struct sge_txq * txq,int len,int * waste)2565 fits_in_txb(struct sge_txq *txq, int len, int *waste)
2566 {
2567 if (txq->txb_avail < len)
2568 return (0);
2569
2570 if (txq->txb_next + len <= txq->txb_size) {
2571 *waste = 0;
2572 return (1);
2573 }
2574
2575 *waste = txq->txb_size - txq->txb_next;
2576
2577 return (txq->txb_avail - *waste < len ? 0 : 1);
2578 }
2579
2580 #define TXB_CHUNK 64
2581
2582 /*
2583 * Copies the specified # of bytes into txq's tx copy buffer and updates txinfo
2584 * and txq to indicate resources used. Caller has to make sure that those many
2585 * bytes are available in the mblk chain (b_cont linked).
2586 */
2587 static inline int
copy_into_txb(struct sge_txq * txq,mblk_t * m,int len,struct txinfo * txinfo)2588 copy_into_txb(struct sge_txq *txq, mblk_t *m, int len, struct txinfo *txinfo)
2589 {
2590 int waste, n;
2591
2592 TXQ_LOCK_ASSERT_OWNED(txq); /* will manipulate txb */
2593
2594 if (!fits_in_txb(txq, len, &waste)) {
2595 txq->txb_full++;
2596 return (ENOMEM);
2597 }
2598
2599 if (waste != 0) {
2600 ASSERT((waste & (TXB_CHUNK - 1)) == 0);
2601 txinfo->txb_used += waste;
2602 txq->txb_avail -= waste;
2603 txq->txb_next = 0;
2604 }
2605
2606 for (n = 0; n < len; m = m->b_cont) {
2607 bcopy(m->b_rptr, txq->txb_va + txq->txb_next + n, MBLKL(m));
2608 n += MBLKL(m);
2609 }
2610
2611 add_seg(txinfo, txq->txb_ba + txq->txb_next, len);
2612
2613 n = roundup(len, TXB_CHUNK);
2614 txinfo->txb_used += n;
2615 txq->txb_avail -= n;
2616 txq->txb_next += n;
2617 ASSERT(txq->txb_next <= txq->txb_size);
2618 if (txq->txb_next == txq->txb_size)
2619 txq->txb_next = 0;
2620
2621 return (0);
2622 }
2623
2624 static inline void
add_seg(struct txinfo * txinfo,uint64_t ba,uint32_t len)2625 add_seg(struct txinfo *txinfo, uint64_t ba, uint32_t len)
2626 {
2627 ASSERT(txinfo->nsegs < TX_SGL_SEGS); /* must have room */
2628
2629 if (txinfo->nsegs != 0) {
2630 int idx = txinfo->nsegs - 1;
2631 txinfo->sgl.sge[idx / 2].len[idx & 1] = cpu_to_be32(len);
2632 txinfo->sgl.sge[idx / 2].addr[idx & 1] = cpu_to_be64(ba);
2633 } else {
2634 txinfo->sgl.len0 = cpu_to_be32(len);
2635 txinfo->sgl.addr0 = cpu_to_be64(ba);
2636 }
2637 txinfo->nsegs++;
2638 }
2639
2640 /*
2641 * This function cleans up any partially allocated resources when it fails so
2642 * there's nothing for the caller to clean up in that case.
2643 *
2644 * EIO indicates permanent failure. Caller should drop the frame containing
2645 * this mblk and continue.
2646 *
2647 * E2BIG indicates that the SGL length for this mblk exceeds the hardware
2648 * limit. Caller should pull up the frame before trying to send it out.
2649 * (This error means our pullup_early heuristic did not work for this frame)
2650 *
2651 * ENOMEM indicates a temporary shortage of resources (DMA handles, other DMA
2652 * resources, etc.). Caller should suspend the tx queue and wait for reclaim to
2653 * free up resources.
2654 */
2655 static inline int
add_mblk(struct sge_txq * txq,struct txinfo * txinfo,mblk_t * m,int len)2656 add_mblk(struct sge_txq *txq, struct txinfo *txinfo, mblk_t *m, int len)
2657 {
2658 ddi_dma_handle_t dhdl;
2659 ddi_dma_cookie_t cookie;
2660 uint_t ccount = 0;
2661 int rc;
2662
2663 TXQ_LOCK_ASSERT_OWNED(txq); /* will manipulate dhdls */
2664
2665 if (txq->tx_dhdl_avail == 0) {
2666 txq->dma_hdl_failed++;
2667 return (ENOMEM);
2668 }
2669
2670 dhdl = txq->tx_dhdl[txq->tx_dhdl_pidx];
2671 rc = ddi_dma_addr_bind_handle(dhdl, NULL, (caddr_t)m->b_rptr, len,
2672 DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &cookie,
2673 &ccount);
2674 if (rc != DDI_DMA_MAPPED) {
2675 txq->dma_map_failed++;
2676
2677 ASSERT(rc != DDI_DMA_INUSE && rc != DDI_DMA_PARTIAL_MAP);
2678
2679 return (rc == DDI_DMA_NORESOURCES ? ENOMEM : EIO);
2680 }
2681
2682 if (ccount + txinfo->nsegs > TX_SGL_SEGS) {
2683 (void) ddi_dma_unbind_handle(dhdl);
2684 return (E2BIG);
2685 }
2686
2687 add_seg(txinfo, cookie.dmac_laddress, cookie.dmac_size);
2688 while (--ccount) {
2689 ddi_dma_nextcookie(dhdl, &cookie);
2690 add_seg(txinfo, cookie.dmac_laddress, cookie.dmac_size);
2691 }
2692
2693 if (++txq->tx_dhdl_pidx == txq->tx_dhdl_total)
2694 txq->tx_dhdl_pidx = 0;
2695 txq->tx_dhdl_avail--;
2696 txinfo->hdls_used++;
2697
2698 return (0);
2699 }
2700
2701 /*
2702 * Releases all the txq resources used up in the specified txinfo.
2703 */
2704 static void
free_txinfo_resources(struct sge_txq * txq,struct txinfo * txinfo)2705 free_txinfo_resources(struct sge_txq *txq, struct txinfo *txinfo)
2706 {
2707 int n;
2708
2709 TXQ_LOCK_ASSERT_OWNED(txq); /* dhdls, txb */
2710
2711 n = txinfo->txb_used;
2712 if (n > 0) {
2713 txq->txb_avail += n;
2714 if (n <= txq->txb_next)
2715 txq->txb_next -= n;
2716 else {
2717 n -= txq->txb_next;
2718 txq->txb_next = txq->txb_size - n;
2719 }
2720 }
2721
2722 for (n = txinfo->hdls_used; n > 0; n--) {
2723 if (txq->tx_dhdl_pidx > 0)
2724 txq->tx_dhdl_pidx--;
2725 else
2726 txq->tx_dhdl_pidx = txq->tx_dhdl_total - 1;
2727 txq->tx_dhdl_avail++;
2728 (void) ddi_dma_unbind_handle(txq->tx_dhdl[txq->tx_dhdl_pidx]);
2729 }
2730 }
2731
2732 /*
2733 * Returns 0 to indicate that m has been accepted into a coalesced tx work
2734 * request. It has either been folded into txpkts or txpkts was flushed and m
2735 * has started a new coalesced work request (as the first frame in a fresh
2736 * txpkts).
2737 *
2738 * Returns non-zero to indicate a failure - caller is responsible for
2739 * transmitting m, if there was anything in txpkts it has been flushed.
2740 */
2741 static int
add_to_txpkts(struct sge_txq * txq,struct txpkts * txpkts,mblk_t * m,struct txinfo * txinfo)2742 add_to_txpkts(struct sge_txq *txq, struct txpkts *txpkts, mblk_t *m,
2743 struct txinfo *txinfo)
2744 {
2745 struct sge_eq *eq = &txq->eq;
2746 int can_coalesce;
2747 struct tx_sdesc *txsd;
2748 uint8_t flits;
2749
2750 TXQ_LOCK_ASSERT_OWNED(txq);
2751
2752 if (txpkts->npkt > 0) {
2753 flits = TXPKTS_PKT_HDR + txinfo->nflits;
2754 can_coalesce = (txinfo->flags & HW_LSO) == 0 &&
2755 txpkts->nflits + flits <= TX_WR_FLITS &&
2756 txpkts->nflits + flits <= eq->avail * 8 &&
2757 txpkts->plen + txinfo->len < 65536;
2758
2759 if (can_coalesce != 0) {
2760 txpkts->tail->b_next = m;
2761 txpkts->tail = m;
2762 txpkts->npkt++;
2763 txpkts->nflits += flits;
2764 txpkts->plen += txinfo->len;
2765
2766 txsd = &txq->sdesc[eq->pidx];
2767 txsd->txb_used += txinfo->txb_used;
2768 txsd->hdls_used += txinfo->hdls_used;
2769
2770 return (0);
2771 }
2772
2773 /*
2774 * Couldn't coalesce m into txpkts. The first order of business
2775 * is to send txpkts on its way. Then we'll revisit m.
2776 */
2777 write_txpkts_wr(txq, txpkts);
2778 }
2779
2780 /*
2781 * Check if we can start a new coalesced tx work request with m as
2782 * the first packet in it.
2783 */
2784
2785 ASSERT(txpkts->npkt == 0);
2786 ASSERT(txinfo->len < 65536);
2787
2788 flits = TXPKTS_WR_HDR + txinfo->nflits;
2789 can_coalesce = (txinfo->flags & HW_LSO) == 0 &&
2790 flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
2791
2792 if (can_coalesce == 0)
2793 return (EINVAL);
2794
2795 /*
2796 * Start a fresh coalesced tx WR with m as the first frame in it.
2797 */
2798 txpkts->tail = m;
2799 txpkts->npkt = 1;
2800 txpkts->nflits = flits;
2801 txpkts->flitp = &eq->desc[eq->pidx].flit[2];
2802 txpkts->plen = txinfo->len;
2803
2804 txsd = &txq->sdesc[eq->pidx];
2805 txsd->m = m;
2806 txsd->txb_used = txinfo->txb_used;
2807 txsd->hdls_used = txinfo->hdls_used;
2808
2809 return (0);
2810 }
2811
2812 /*
2813 * Note that write_txpkts_wr can never run out of hardware descriptors (but
2814 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for
2815 * coalescing only if sufficient hardware descriptors are available.
2816 */
2817 static void
write_txpkts_wr(struct sge_txq * txq,struct txpkts * txpkts)2818 write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
2819 {
2820 struct sge_eq *eq = &txq->eq;
2821 struct fw_eth_tx_pkts_wr *wr;
2822 struct tx_sdesc *txsd;
2823 uint32_t ctrl;
2824 uint16_t ndesc;
2825
2826 TXQ_LOCK_ASSERT_OWNED(txq); /* pidx, avail */
2827
2828 ndesc = howmany(txpkts->nflits, 8);
2829
2830 wr = (void *)&eq->desc[eq->pidx];
2831 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) |
2832 V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */
2833 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
2834 if (eq->avail == ndesc)
2835 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
2836 wr->equiq_to_len16 = cpu_to_be32(ctrl);
2837 wr->plen = cpu_to_be16(txpkts->plen);
2838 wr->npkt = txpkts->npkt;
2839 wr->r3 = wr->type = 0;
2840
2841 /* Everything else already written */
2842
2843 txsd = &txq->sdesc[eq->pidx];
2844 txsd->desc_used = ndesc;
2845
2846 txq->txb_used += txsd->txb_used / TXB_CHUNK;
2847 txq->hdl_used += txsd->hdls_used;
2848
2849 ASSERT(eq->avail >= ndesc);
2850
2851 eq->pending += ndesc;
2852 eq->avail -= ndesc;
2853 eq->pidx += ndesc;
2854 if (eq->pidx >= eq->cap)
2855 eq->pidx -= eq->cap;
2856
2857 txq->txpkts_pkts += txpkts->npkt;
2858 txq->txpkts_wrs++;
2859 txpkts->npkt = 0; /* emptied */
2860 }
2861
2862 static int
write_txpkt_wr(struct port_info * pi,struct sge_txq * txq,mblk_t * m,struct txinfo * txinfo)2863 write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, mblk_t *m,
2864 struct txinfo *txinfo)
2865 {
2866 struct sge_eq *eq = &txq->eq;
2867 struct fw_eth_tx_pkt_wr *wr;
2868 struct cpl_tx_pkt_core *cpl;
2869 uint32_t ctrl; /* used in many unrelated places */
2870 uint64_t ctrl1;
2871 int nflits, ndesc;
2872 struct tx_sdesc *txsd;
2873 caddr_t dst;
2874
2875 TXQ_LOCK_ASSERT_OWNED(txq); /* pidx, avail */
2876
2877