156b2bdd1SGireesh Nagabhushana /*
256b2bdd1SGireesh Nagabhushana  * This file and its contents are supplied under the terms of the
356b2bdd1SGireesh Nagabhushana  * Common Development and Distribution License ("CDDL"), version 1.0.
456b2bdd1SGireesh Nagabhushana  * You may only use this file in accordance with the terms of version
556b2bdd1SGireesh Nagabhushana  * 1.0 of the CDDL.
656b2bdd1SGireesh Nagabhushana  *
756b2bdd1SGireesh Nagabhushana  * A full copy of the text of the CDDL should have accompanied this
856b2bdd1SGireesh Nagabhushana  * source. A copy of the CDDL is also available via the Internet at
956b2bdd1SGireesh Nagabhushana  * http://www.illumos.org/license/CDDL.
1056b2bdd1SGireesh Nagabhushana  */
1156b2bdd1SGireesh Nagabhushana 
1256b2bdd1SGireesh Nagabhushana /*
1356b2bdd1SGireesh Nagabhushana  * This file is part of the Chelsio T4 support code.
1456b2bdd1SGireesh Nagabhushana  *
1556b2bdd1SGireesh Nagabhushana  * Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
1656b2bdd1SGireesh Nagabhushana  *
1756b2bdd1SGireesh Nagabhushana  * This program is distributed in the hope that it will be useful, but WITHOUT
1856b2bdd1SGireesh Nagabhushana  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1956b2bdd1SGireesh Nagabhushana  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
2056b2bdd1SGireesh Nagabhushana  * release for licensing terms and conditions.
2156b2bdd1SGireesh Nagabhushana  */
2256b2bdd1SGireesh Nagabhushana 
23d75c6062SRobert Mustacchi /*
24d75c6062SRobert Mustacchi  * Copyright 2021 Oxide Computer Company
25d75c6062SRobert Mustacchi  */
26d75c6062SRobert Mustacchi 
2756b2bdd1SGireesh Nagabhushana #include <sys/ddi.h>
2856b2bdd1SGireesh Nagabhushana #include <sys/sunddi.h>
2956b2bdd1SGireesh Nagabhushana #include <sys/sunndi.h>
3056b2bdd1SGireesh Nagabhushana #include <sys/atomic.h>
3156b2bdd1SGireesh Nagabhushana #include <sys/dlpi.h>
3256b2bdd1SGireesh Nagabhushana #include <sys/pattr.h>
3356b2bdd1SGireesh Nagabhushana #include <sys/strsubr.h>
3456b2bdd1SGireesh Nagabhushana #include <sys/stream.h>
3556b2bdd1SGireesh Nagabhushana #include <sys/strsun.h>
3656b2bdd1SGireesh Nagabhushana #include <inet/ip.h>
3756b2bdd1SGireesh Nagabhushana #include <inet/tcp.h>
3856b2bdd1SGireesh Nagabhushana 
3956b2bdd1SGireesh Nagabhushana #include "version.h"
4056b2bdd1SGireesh Nagabhushana #include "common/common.h"
4156b2bdd1SGireesh Nagabhushana #include "common/t4_msg.h"
4256b2bdd1SGireesh Nagabhushana #include "common/t4_regs.h"
4356b2bdd1SGireesh Nagabhushana #include "common/t4_regs_values.h"
4456b2bdd1SGireesh Nagabhushana 
4556b2bdd1SGireesh Nagabhushana /* TODO: Tune. */
4656b2bdd1SGireesh Nagabhushana int rx_buf_size = 8192;
4756b2bdd1SGireesh Nagabhushana int tx_copy_threshold = 256;
4856b2bdd1SGireesh Nagabhushana uint16_t rx_copy_threshold = 256;
4956b2bdd1SGireesh Nagabhushana 
5056b2bdd1SGireesh Nagabhushana /* Used to track coalesced tx work request */
5156b2bdd1SGireesh Nagabhushana struct txpkts {
5256b2bdd1SGireesh Nagabhushana 	mblk_t *tail;		/* head is in the software descriptor */
5356b2bdd1SGireesh Nagabhushana 	uint64_t *flitp;	/* ptr to flit where next pkt should start */
5456b2bdd1SGireesh Nagabhushana 	uint8_t npkt;		/* # of packets in this work request */
5556b2bdd1SGireesh Nagabhushana 	uint8_t nflits;		/* # of flits used by this work request */
5656b2bdd1SGireesh Nagabhushana 	uint16_t plen;		/* total payload (sum of all packets) */
5756b2bdd1SGireesh Nagabhushana };
5856b2bdd1SGireesh Nagabhushana 
5956b2bdd1SGireesh Nagabhushana /* All information needed to tx a frame */
6056b2bdd1SGireesh Nagabhushana struct txinfo {
6156b2bdd1SGireesh Nagabhushana 	uint32_t len;		/* Total length of frame */
6256b2bdd1SGireesh Nagabhushana 	uint32_t flags;		/* Checksum and LSO flags */
6356b2bdd1SGireesh Nagabhushana 	uint32_t mss;		/* MSS for LSO */
6456b2bdd1SGireesh Nagabhushana 	uint8_t nsegs;		/* # of segments in the SGL, 0 means imm. tx */
6556b2bdd1SGireesh Nagabhushana 	uint8_t nflits;		/* # of flits needed for the SGL */
6656b2bdd1SGireesh Nagabhushana 	uint8_t hdls_used;	/* # of DMA handles used */
6756b2bdd1SGireesh Nagabhushana 	uint32_t txb_used;	/* txb_space used */
6856b2bdd1SGireesh Nagabhushana 	struct ulptx_sgl sgl __attribute__((aligned(8)));
6956b2bdd1SGireesh Nagabhushana 	struct ulptx_sge_pair reserved[TX_SGL_SEGS / 2];
7056b2bdd1SGireesh Nagabhushana };
7156b2bdd1SGireesh Nagabhushana 
7256b2bdd1SGireesh Nagabhushana static int service_iq(struct sge_iq *iq, int budget);
7356b2bdd1SGireesh Nagabhushana static inline void init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx,
7456b2bdd1SGireesh Nagabhushana     int8_t pktc_idx, int qsize, uint8_t esize);
7556b2bdd1SGireesh Nagabhushana static inline void init_fl(struct sge_fl *fl, uint16_t qsize);
76de483253SVishal Kulkarni static inline void init_eq(struct adapter *sc, struct sge_eq *eq,
77de483253SVishal Kulkarni     uint16_t eqtype, uint16_t qsize,uint8_t tx_chan, uint16_t iqid);
7856b2bdd1SGireesh Nagabhushana static int alloc_iq_fl(struct port_info *pi, struct sge_iq *iq,
7956b2bdd1SGireesh Nagabhushana     struct sge_fl *fl, int intr_idx, int cong);
8056b2bdd1SGireesh Nagabhushana static int free_iq_fl(struct port_info *pi, struct sge_iq *iq,
8156b2bdd1SGireesh Nagabhushana     struct sge_fl *fl);
8256b2bdd1SGireesh Nagabhushana static int alloc_fwq(struct adapter *sc);
8356b2bdd1SGireesh Nagabhushana static int free_fwq(struct adapter *sc);
843dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
8556b2bdd1SGireesh Nagabhushana static int alloc_mgmtq(struct adapter *sc);
863dde7c95SVishal Kulkarni #endif
8756b2bdd1SGireesh Nagabhushana static int alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx,
8856b2bdd1SGireesh Nagabhushana     int i);
8956b2bdd1SGireesh Nagabhushana static int free_rxq(struct port_info *pi, struct sge_rxq *rxq);
903dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
9156b2bdd1SGireesh Nagabhushana static int alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
9256b2bdd1SGireesh Nagabhushana 	int intr_idx);
9356b2bdd1SGireesh Nagabhushana static int free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq);
9456b2bdd1SGireesh Nagabhushana #endif
9556b2bdd1SGireesh Nagabhushana static int ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq);
9656b2bdd1SGireesh Nagabhushana static int eth_eq_alloc(struct adapter *sc, struct port_info *pi,
9756b2bdd1SGireesh Nagabhushana     struct sge_eq *eq);
983dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
9956b2bdd1SGireesh Nagabhushana static int ofld_eq_alloc(struct adapter *sc, struct port_info *pi,
10056b2bdd1SGireesh Nagabhushana     struct sge_eq *eq);
10156b2bdd1SGireesh Nagabhushana #endif
10256b2bdd1SGireesh Nagabhushana static int alloc_eq(struct adapter *sc, struct port_info *pi,
10356b2bdd1SGireesh Nagabhushana     struct sge_eq *eq);
10456b2bdd1SGireesh Nagabhushana static int free_eq(struct adapter *sc, struct sge_eq *eq);
1053dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
10656b2bdd1SGireesh Nagabhushana static int alloc_wrq(struct adapter *sc, struct port_info *pi,
10756b2bdd1SGireesh Nagabhushana     struct sge_wrq *wrq, int idx);
10856b2bdd1SGireesh Nagabhushana static int free_wrq(struct adapter *sc, struct sge_wrq *wrq);
1093dde7c95SVishal Kulkarni #endif
11056b2bdd1SGireesh Nagabhushana static int alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx);
11156b2bdd1SGireesh Nagabhushana static int free_txq(struct port_info *pi, struct sge_txq *txq);
11256b2bdd1SGireesh Nagabhushana static int alloc_dma_memory(struct adapter *sc, size_t len, int flags,
11356b2bdd1SGireesh Nagabhushana     ddi_device_acc_attr_t *acc_attr, ddi_dma_attr_t *dma_attr,
11456b2bdd1SGireesh Nagabhushana     ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl, uint64_t *pba,
11556b2bdd1SGireesh Nagabhushana     caddr_t *pva);
11656b2bdd1SGireesh Nagabhushana static int free_dma_memory(ddi_dma_handle_t *dhdl, ddi_acc_handle_t *ahdl);
11756b2bdd1SGireesh Nagabhushana static int alloc_desc_ring(struct adapter *sc, size_t len, int rw,
11856b2bdd1SGireesh Nagabhushana     ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl, uint64_t *pba,
11956b2bdd1SGireesh Nagabhushana     caddr_t *pva);
12056b2bdd1SGireesh Nagabhushana static int free_desc_ring(ddi_dma_handle_t *dhdl, ddi_acc_handle_t *ahdl);
12156b2bdd1SGireesh Nagabhushana static int alloc_tx_copybuffer(struct adapter *sc, size_t len,
12256b2bdd1SGireesh Nagabhushana     ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl, uint64_t *pba,
12356b2bdd1SGireesh Nagabhushana     caddr_t *pva);
12456b2bdd1SGireesh Nagabhushana static inline bool is_new_response(const struct sge_iq *iq,
12556b2bdd1SGireesh Nagabhushana     struct rsp_ctrl **ctrl);
12656b2bdd1SGireesh Nagabhushana static inline void iq_next(struct sge_iq *iq);
12756b2bdd1SGireesh Nagabhushana static int refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs);
12856b2bdd1SGireesh Nagabhushana static void refill_sfl(void *arg);
12956b2bdd1SGireesh Nagabhushana static void add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl);
13056b2bdd1SGireesh Nagabhushana static void free_fl_bufs(struct sge_fl *fl);
1313dde7c95SVishal Kulkarni static mblk_t *get_fl_payload(struct adapter *sc, struct sge_fl *fl,
1323dde7c95SVishal Kulkarni     uint32_t len_newbuf, int *fl_bufs_used);
13356b2bdd1SGireesh Nagabhushana static int get_frame_txinfo(struct sge_txq *txq, mblk_t **fp,
13456b2bdd1SGireesh Nagabhushana     struct txinfo *txinfo, int sgl_only);
13556b2bdd1SGireesh Nagabhushana static inline int fits_in_txb(struct sge_txq *txq, int len, int *waste);
13656b2bdd1SGireesh Nagabhushana static inline int copy_into_txb(struct sge_txq *txq, mblk_t *m, int len,
13756b2bdd1SGireesh Nagabhushana     struct txinfo *txinfo);
13856b2bdd1SGireesh Nagabhushana static inline void add_seg(struct txinfo *txinfo, uint64_t ba, uint32_t len);
13956b2bdd1SGireesh Nagabhushana static inline int add_mblk(struct sge_txq *txq, struct txinfo *txinfo,
14056b2bdd1SGireesh Nagabhushana     mblk_t *m, int len);
14156b2bdd1SGireesh Nagabhushana static void free_txinfo_resources(struct sge_txq *txq, struct txinfo *txinfo);
14256b2bdd1SGireesh Nagabhushana static int add_to_txpkts(struct sge_txq *txq, struct txpkts *txpkts, mblk_t *m,
14356b2bdd1SGireesh Nagabhushana     struct txinfo *txinfo);
14456b2bdd1SGireesh Nagabhushana static void write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts);
14556b2bdd1SGireesh Nagabhushana static int write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, mblk_t *m,
14656b2bdd1SGireesh Nagabhushana     struct txinfo *txinfo);
14756b2bdd1SGireesh Nagabhushana static inline void write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
14856b2bdd1SGireesh Nagabhushana     struct txpkts *txpkts, struct txinfo *txinfo);
14956b2bdd1SGireesh Nagabhushana static inline void copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to,
15056b2bdd1SGireesh Nagabhushana     int len);
15156b2bdd1SGireesh Nagabhushana static inline void ring_tx_db(struct adapter *sc, struct sge_eq *eq);
15256b2bdd1SGireesh Nagabhushana static int reclaim_tx_descs(struct sge_txq *txq, int howmany);
15356b2bdd1SGireesh Nagabhushana static void write_txqflush_wr(struct sge_txq *txq);
15456b2bdd1SGireesh Nagabhushana static int t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss,
15556b2bdd1SGireesh Nagabhushana     mblk_t *m);
15656b2bdd1SGireesh Nagabhushana static inline void ring_fl_db(struct adapter *sc, struct sge_fl *fl);
15756b2bdd1SGireesh Nagabhushana static kstat_t *setup_port_config_kstats(struct port_info *pi);
15856b2bdd1SGireesh Nagabhushana static kstat_t *setup_port_info_kstats(struct port_info *pi);
15956b2bdd1SGireesh Nagabhushana static kstat_t *setup_rxq_kstats(struct port_info *pi, struct sge_rxq *rxq,
16056b2bdd1SGireesh Nagabhushana     int idx);
16156b2bdd1SGireesh Nagabhushana static int update_rxq_kstats(kstat_t *ksp, int rw);
16256b2bdd1SGireesh Nagabhushana static int update_port_info_kstats(kstat_t *ksp, int rw);
16356b2bdd1SGireesh Nagabhushana static kstat_t *setup_txq_kstats(struct port_info *pi, struct sge_txq *txq,
16456b2bdd1SGireesh Nagabhushana     int idx);
16556b2bdd1SGireesh Nagabhushana static int update_txq_kstats(kstat_t *ksp, int rw);
1663dde7c95SVishal Kulkarni static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
1673dde7c95SVishal Kulkarni     mblk_t *);
16856b2bdd1SGireesh Nagabhushana static int handle_fw_rpl(struct sge_iq *iq, const struct rss_header *rss,
16956b2bdd1SGireesh Nagabhushana     mblk_t *m);
17056b2bdd1SGireesh Nagabhushana 
17156b2bdd1SGireesh Nagabhushana static inline int
reclaimable(struct sge_eq * eq)17256b2bdd1SGireesh Nagabhushana reclaimable(struct sge_eq *eq)
17356b2bdd1SGireesh Nagabhushana {
17456b2bdd1SGireesh Nagabhushana 	unsigned int cidx;
17556b2bdd1SGireesh Nagabhushana 
17656b2bdd1SGireesh Nagabhushana 	cidx = eq->spg->cidx;   /* stable snapshot */
17756b2bdd1SGireesh Nagabhushana 	cidx = be16_to_cpu(cidx);
17856b2bdd1SGireesh Nagabhushana 
17956b2bdd1SGireesh Nagabhushana 	if (cidx >= eq->cidx)
18056b2bdd1SGireesh Nagabhushana 		return (cidx - eq->cidx);
18156b2bdd1SGireesh Nagabhushana 	else
18256b2bdd1SGireesh Nagabhushana 		return (cidx + eq->cap - eq->cidx);
18356b2bdd1SGireesh Nagabhushana }
18456b2bdd1SGireesh Nagabhushana 
18556b2bdd1SGireesh Nagabhushana void
t4_sge_init(struct adapter * sc)18656b2bdd1SGireesh Nagabhushana t4_sge_init(struct adapter *sc)
18756b2bdd1SGireesh Nagabhushana {
18856b2bdd1SGireesh Nagabhushana 	struct driver_properties *p = &sc->props;
18956b2bdd1SGireesh Nagabhushana 	ddi_dma_attr_t *dma_attr;
19056b2bdd1SGireesh Nagabhushana 	ddi_device_acc_attr_t *acc_attr;
1913dde7c95SVishal Kulkarni 	uint32_t sge_control, sge_conm_ctrl;
1923dde7c95SVishal Kulkarni 	int egress_threshold;
19356b2bdd1SGireesh Nagabhushana 
19456b2bdd1SGireesh Nagabhushana 	/*
19556b2bdd1SGireesh Nagabhushana 	 * Device access and DMA attributes for descriptor rings
19656b2bdd1SGireesh Nagabhushana 	 */
19756b2bdd1SGireesh Nagabhushana 	acc_attr = &sc->sge.acc_attr_desc;
19856b2bdd1SGireesh Nagabhushana 	acc_attr->devacc_attr_version = DDI_DEVICE_ATTR_V0;
19956b2bdd1SGireesh Nagabhushana 	acc_attr->devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
20056b2bdd1SGireesh Nagabhushana 	acc_attr->devacc_attr_dataorder = DDI_STRICTORDER_ACC;
20156b2bdd1SGireesh Nagabhushana 
20256b2bdd1SGireesh Nagabhushana 	dma_attr = &sc->sge.dma_attr_desc;
20356b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_version = DMA_ATTR_V0;
20456b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_addr_lo = 0;
20556b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_addr_hi = UINT64_MAX;
20656b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_count_max = UINT64_MAX;
20756b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_align = 512;
20856b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_burstsizes = 0xfff;
20956b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_minxfer = 1;
21056b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_maxxfer = UINT64_MAX;
21156b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_seg = UINT64_MAX;
21256b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_sgllen = 1;
21356b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_granular = 1;
21456b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_flags = 0;
21556b2bdd1SGireesh Nagabhushana 
21656b2bdd1SGireesh Nagabhushana 	/*
21756b2bdd1SGireesh Nagabhushana 	 * Device access and DMA attributes for tx buffers
21856b2bdd1SGireesh Nagabhushana 	 */
21956b2bdd1SGireesh Nagabhushana 	acc_attr = &sc->sge.acc_attr_tx;
22056b2bdd1SGireesh Nagabhushana 	acc_attr->devacc_attr_version = DDI_DEVICE_ATTR_V0;
22156b2bdd1SGireesh Nagabhushana 	acc_attr->devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
22256b2bdd1SGireesh Nagabhushana 
22356b2bdd1SGireesh Nagabhushana 	dma_attr = &sc->sge.dma_attr_tx;
22456b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_version = DMA_ATTR_V0;
22556b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_addr_lo = 0;
22656b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_addr_hi = UINT64_MAX;
22756b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_count_max = UINT64_MAX;
22856b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_align = 1;
22956b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_burstsizes = 0xfff;
23056b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_minxfer = 1;
23156b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_maxxfer = UINT64_MAX;
23256b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_seg = UINT64_MAX;
23356b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_sgllen = TX_SGL_SEGS;
23456b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_granular = 1;
23556b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_flags = 0;
23656b2bdd1SGireesh Nagabhushana 
2373dde7c95SVishal Kulkarni 	/*
2383dde7c95SVishal Kulkarni 	 * Ingress Padding Boundary and Egress Status Page Size are set up by
2393dde7c95SVishal Kulkarni 	 * t4_fixup_host_params().
2403dde7c95SVishal Kulkarni 	 */
2413dde7c95SVishal Kulkarni 	sge_control = t4_read_reg(sc, A_SGE_CONTROL);
2423dde7c95SVishal Kulkarni 	sc->sge.pktshift = G_PKTSHIFT(sge_control);
2433dde7c95SVishal Kulkarni 	sc->sge.stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
2443dde7c95SVishal Kulkarni 
2453dde7c95SVishal Kulkarni 	/* t4_nex uses FLM packed mode */
2463dde7c95SVishal Kulkarni 	sc->sge.fl_align = t4_fl_pkt_align(sc, true);
2473dde7c95SVishal Kulkarni 
24856b2bdd1SGireesh Nagabhushana 	/*
24956b2bdd1SGireesh Nagabhushana 	 * Device access and DMA attributes for rx buffers
25056b2bdd1SGireesh Nagabhushana 	 */
25156b2bdd1SGireesh Nagabhushana 	sc->sge.rxb_params.dip = sc->dip;
25256b2bdd1SGireesh Nagabhushana 	sc->sge.rxb_params.buf_size = rx_buf_size;
25356b2bdd1SGireesh Nagabhushana 
25456b2bdd1SGireesh Nagabhushana 	acc_attr = &sc->sge.rxb_params.acc_attr_rx;
25556b2bdd1SGireesh Nagabhushana 	acc_attr->devacc_attr_version = DDI_DEVICE_ATTR_V0;
25656b2bdd1SGireesh Nagabhushana 	acc_attr->devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
25756b2bdd1SGireesh Nagabhushana 
25856b2bdd1SGireesh Nagabhushana 	dma_attr = &sc->sge.rxb_params.dma_attr_rx;
25956b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_version = DMA_ATTR_V0;
26056b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_addr_lo = 0;
26156b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_addr_hi = UINT64_MAX;
26256b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_count_max = UINT64_MAX;
26356b2bdd1SGireesh Nagabhushana 	/*
26456b2bdd1SGireesh Nagabhushana 	 * Low 4 bits of an rx buffer address have a special meaning to the SGE
26556b2bdd1SGireesh Nagabhushana 	 * and an rx buf cannot have an address with any of these bits set.
26656b2bdd1SGireesh Nagabhushana 	 * FL_ALIGN is >= 32 so we're sure things are ok.
26756b2bdd1SGireesh Nagabhushana 	 */
2683dde7c95SVishal Kulkarni 	dma_attr->dma_attr_align = sc->sge.fl_align;
26956b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_burstsizes = 0xfff;
27056b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_minxfer = 1;
27156b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_maxxfer = UINT64_MAX;
27256b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_seg = UINT64_MAX;
27356b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_sgllen = 1;
27456b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_granular = 1;
27556b2bdd1SGireesh Nagabhushana 	dma_attr->dma_attr_flags = 0;
27656b2bdd1SGireesh Nagabhushana 
27756b2bdd1SGireesh Nagabhushana 	sc->sge.rxbuf_cache = rxbuf_cache_create(&sc->sge.rxb_params);
27856b2bdd1SGireesh Nagabhushana 
2793dde7c95SVishal Kulkarni 	/*
2803dde7c95SVishal Kulkarni 	 * A FL with <= fl_starve_thres buffers is starving and a periodic
2813dde7c95SVishal Kulkarni 	 * timer will attempt to refill it.  This needs to be larger than the
2823dde7c95SVishal Kulkarni 	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
2833dde7c95SVishal Kulkarni 	 * stuck waiting for new packets while the SGE is waiting for us to
2843dde7c95SVishal Kulkarni 	 * give it more Free List entries.  (Note that the SGE's Egress
2853dde7c95SVishal Kulkarni 	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2863dde7c95SVishal Kulkarni 	 * there was only a single field to control this.  For T5 there's the
2873dde7c95SVishal Kulkarni 	 * original field which now only applies to Unpacked Mode Free List
2883dde7c95SVishal Kulkarni 	 * buffers and a new field which only applies to Packed Mode Free List
2893dde7c95SVishal Kulkarni 	 * buffers.
2903dde7c95SVishal Kulkarni 	 */
29156b2bdd1SGireesh Nagabhushana 
2923dde7c95SVishal Kulkarni 	sge_conm_ctrl = t4_read_reg(sc, A_SGE_CONM_CTRL);
2933dde7c95SVishal Kulkarni 	switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
2943dde7c95SVishal Kulkarni 	case CHELSIO_T4:
2953dde7c95SVishal Kulkarni 		egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
2963dde7c95SVishal Kulkarni 		break;
2973dde7c95SVishal Kulkarni 	case CHELSIO_T5:
2983dde7c95SVishal Kulkarni 		egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
2993dde7c95SVishal Kulkarni 		break;
3003dde7c95SVishal Kulkarni 	case CHELSIO_T6:
3013dde7c95SVishal Kulkarni 	default:
3023dde7c95SVishal Kulkarni 		egress_threshold = G_T6_EGRTHRESHOLDPACKING(sge_conm_ctrl);
3033dde7c95SVishal Kulkarni 	}
3043dde7c95SVishal Kulkarni 	sc->sge.fl_starve_threshold = 2*egress_threshold + 1;
30556b2bdd1SGireesh Nagabhushana 
30656b2bdd1SGireesh Nagabhushana 	t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, rx_buf_size);
30756b2bdd1SGireesh Nagabhushana 
30856b2bdd1SGireesh Nagabhushana 	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
30956b2bdd1SGireesh Nagabhushana 	    V_THRESHOLD_0(p->counter_val[0]) |
31056b2bdd1SGireesh Nagabhushana 	    V_THRESHOLD_1(p->counter_val[1]) |
31156b2bdd1SGireesh Nagabhushana 	    V_THRESHOLD_2(p->counter_val[2]) |
31256b2bdd1SGireesh Nagabhushana 	    V_THRESHOLD_3(p->counter_val[3]));
31356b2bdd1SGireesh Nagabhushana 
31456b2bdd1SGireesh Nagabhushana 	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
31556b2bdd1SGireesh Nagabhushana 	    V_TIMERVALUE0(us_to_core_ticks(sc, p->timer_val[0])) |
31656b2bdd1SGireesh Nagabhushana 	    V_TIMERVALUE1(us_to_core_ticks(sc, p->timer_val[1])));
31756b2bdd1SGireesh Nagabhushana 	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
31856b2bdd1SGireesh Nagabhushana 	    V_TIMERVALUE2(us_to_core_ticks(sc, p->timer_val[2])) |
31956b2bdd1SGireesh Nagabhushana 	    V_TIMERVALUE3(us_to_core_ticks(sc, p->timer_val[3])));
32056b2bdd1SGireesh Nagabhushana 	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
32156b2bdd1SGireesh Nagabhushana 	    V_TIMERVALUE4(us_to_core_ticks(sc, p->timer_val[4])) |
32256b2bdd1SGireesh Nagabhushana 	    V_TIMERVALUE5(us_to_core_ticks(sc, p->timer_val[5])));
32356b2bdd1SGireesh Nagabhushana 
32456b2bdd1SGireesh Nagabhushana 	(void) t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_rpl);
32556b2bdd1SGireesh Nagabhushana 	(void) t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_rpl);
3263dde7c95SVishal Kulkarni 	(void) t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
32756b2bdd1SGireesh Nagabhushana 	(void) t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx);
328de483253SVishal Kulkarni 	(void) t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL,
329de483253SVishal Kulkarni 		    t4_handle_fw_rpl);
33056b2bdd1SGireesh Nagabhushana }
33156b2bdd1SGireesh Nagabhushana 
33256b2bdd1SGireesh Nagabhushana /*
33356b2bdd1SGireesh Nagabhushana  * Allocate and initialize the firmware event queue and the forwarded interrupt
33456b2bdd1SGireesh Nagabhushana  * queues, if any.  The adapter owns all these queues as they are not associated
33556b2bdd1SGireesh Nagabhushana  * with any particular port.
33656b2bdd1SGireesh Nagabhushana  *
33756b2bdd1SGireesh Nagabhushana  * Returns errno on failure.  Resources allocated up to that point may still be
33856b2bdd1SGireesh Nagabhushana  * allocated.  Caller is responsible for cleanup in case this function fails.
33956b2bdd1SGireesh Nagabhushana  */
34056b2bdd1SGireesh Nagabhushana int
t4_setup_adapter_queues(struct adapter * sc)34156b2bdd1SGireesh Nagabhushana t4_setup_adapter_queues(struct adapter *sc)
34256b2bdd1SGireesh Nagabhushana {
34356b2bdd1SGireesh Nagabhushana 	int rc;
34456b2bdd1SGireesh Nagabhushana 
34556b2bdd1SGireesh Nagabhushana 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
34656b2bdd1SGireesh Nagabhushana 
34756b2bdd1SGireesh Nagabhushana 	/*
34856b2bdd1SGireesh Nagabhushana 	 * Firmware event queue
34956b2bdd1SGireesh Nagabhushana 	 */
35056b2bdd1SGireesh Nagabhushana 	rc = alloc_fwq(sc);
35156b2bdd1SGireesh Nagabhushana 	if (rc != 0)
35256b2bdd1SGireesh Nagabhushana 		return (rc);
35356b2bdd1SGireesh Nagabhushana 
3543dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
35556b2bdd1SGireesh Nagabhushana 	/*
35656b2bdd1SGireesh Nagabhushana 	 * Management queue.  This is just a control queue that uses the fwq as
35756b2bdd1SGireesh Nagabhushana 	 * its associated iq.
35856b2bdd1SGireesh Nagabhushana 	 */
35956b2bdd1SGireesh Nagabhushana 	rc = alloc_mgmtq(sc);
3603dde7c95SVishal Kulkarni #endif
36156b2bdd1SGireesh Nagabhushana 
36256b2bdd1SGireesh Nagabhushana 	return (rc);
36356b2bdd1SGireesh Nagabhushana }
36456b2bdd1SGireesh Nagabhushana 
36556b2bdd1SGireesh Nagabhushana /*
36656b2bdd1SGireesh Nagabhushana  * Idempotent
36756b2bdd1SGireesh Nagabhushana  */
36856b2bdd1SGireesh Nagabhushana int
t4_teardown_adapter_queues(struct adapter * sc)36956b2bdd1SGireesh Nagabhushana t4_teardown_adapter_queues(struct adapter *sc)
37056b2bdd1SGireesh Nagabhushana {
37156b2bdd1SGireesh Nagabhushana 
37256b2bdd1SGireesh Nagabhushana 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
37356b2bdd1SGireesh Nagabhushana 
37456b2bdd1SGireesh Nagabhushana 	(void) free_fwq(sc);
37556b2bdd1SGireesh Nagabhushana 
37656b2bdd1SGireesh Nagabhushana 	return (0);
37756b2bdd1SGireesh Nagabhushana }
37856b2bdd1SGireesh Nagabhushana 
37956b2bdd1SGireesh Nagabhushana static inline int
first_vector(struct port_info * pi)38056b2bdd1SGireesh Nagabhushana first_vector(struct port_info *pi)
38156b2bdd1SGireesh Nagabhushana {
38256b2bdd1SGireesh Nagabhushana 	struct adapter *sc = pi->adapter;
38356b2bdd1SGireesh Nagabhushana 	int rc = T4_EXTRA_INTR, i;
38456b2bdd1SGireesh Nagabhushana 
38556b2bdd1SGireesh Nagabhushana 	if (sc->intr_count == 1)
38656b2bdd1SGireesh Nagabhushana 		return (0);
38756b2bdd1SGireesh Nagabhushana 
38856b2bdd1SGireesh Nagabhushana 	for_each_port(sc, i) {
38956b2bdd1SGireesh Nagabhushana 		struct port_info *p = sc->port[i];
39056b2bdd1SGireesh Nagabhushana 
39156b2bdd1SGireesh Nagabhushana 		if (i == pi->port_id)
39256b2bdd1SGireesh Nagabhushana 			break;
39356b2bdd1SGireesh Nagabhushana 
3943dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
39556b2bdd1SGireesh Nagabhushana 		if (!(sc->flags & INTR_FWD))
39656b2bdd1SGireesh Nagabhushana 			rc += p->nrxq + p->nofldrxq;
39756b2bdd1SGireesh Nagabhushana 		else
39856b2bdd1SGireesh Nagabhushana 			rc += max(p->nrxq, p->nofldrxq);
39956b2bdd1SGireesh Nagabhushana #else
40056b2bdd1SGireesh Nagabhushana 		/*
40156b2bdd1SGireesh Nagabhushana 		 * Not compiled with offload support and intr_count > 1.  Only
40256b2bdd1SGireesh Nagabhushana 		 * NIC queues exist and they'd better be taking direct
40356b2bdd1SGireesh Nagabhushana 		 * interrupts.
40456b2bdd1SGireesh Nagabhushana 		 */
405de483253SVishal Kulkarni 		ASSERT(!(sc->flags & INTR_FWD));
40656b2bdd1SGireesh Nagabhushana 		rc += p->nrxq;
40756b2bdd1SGireesh Nagabhushana #endif
40856b2bdd1SGireesh Nagabhushana 	}
40956b2bdd1SGireesh Nagabhushana 	return (rc);
41056b2bdd1SGireesh Nagabhushana }
41156b2bdd1SGireesh Nagabhushana 
41256b2bdd1SGireesh Nagabhushana /*
41356b2bdd1SGireesh Nagabhushana  * Given an arbitrary "index," come up with an iq that can be used by other
41456b2bdd1SGireesh Nagabhushana  * queues (of this port) for interrupt forwarding, SGE egress updates, etc.
41556b2bdd1SGireesh Nagabhushana  * The iq returned is guaranteed to be something that takes direct interrupts.
41656b2bdd1SGireesh Nagabhushana  */
41756b2bdd1SGireesh Nagabhushana static struct sge_iq *
port_intr_iq(struct port_info * pi,int idx)41856b2bdd1SGireesh Nagabhushana port_intr_iq(struct port_info *pi, int idx)
41956b2bdd1SGireesh Nagabhushana {
42056b2bdd1SGireesh Nagabhushana 	struct adapter *sc = pi->adapter;
42156b2bdd1SGireesh Nagabhushana 	struct sge *s = &sc->sge;
42256b2bdd1SGireesh Nagabhushana 	struct sge_iq *iq = NULL;
42356b2bdd1SGireesh Nagabhushana 
42456b2bdd1SGireesh Nagabhushana 	if (sc->intr_count == 1)
42556b2bdd1SGireesh Nagabhushana 		return (&sc->sge.fwq);
42656b2bdd1SGireesh Nagabhushana 
4273dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
42856b2bdd1SGireesh Nagabhushana 	if (!(sc->flags & INTR_FWD)) {
42956b2bdd1SGireesh Nagabhushana 		idx %= pi->nrxq + pi->nofldrxq;
43056b2bdd1SGireesh Nagabhushana 
43156b2bdd1SGireesh Nagabhushana 		if (idx >= pi->nrxq) {
43256b2bdd1SGireesh Nagabhushana 			idx -= pi->nrxq;
43356b2bdd1SGireesh Nagabhushana 			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
43456b2bdd1SGireesh Nagabhushana 		} else
43556b2bdd1SGireesh Nagabhushana 			iq = &s->rxq[pi->first_rxq + idx].iq;
43656b2bdd1SGireesh Nagabhushana 
43756b2bdd1SGireesh Nagabhushana 	} else {
43856b2bdd1SGireesh Nagabhushana 		idx %= max(pi->nrxq, pi->nofldrxq);
43956b2bdd1SGireesh Nagabhushana 
44056b2bdd1SGireesh Nagabhushana 		if (pi->nrxq >= pi->nofldrxq)
44156b2bdd1SGireesh Nagabhushana 			iq = &s->rxq[pi->first_rxq + idx].iq;
44256b2bdd1SGireesh Nagabhushana 		else
44356b2bdd1SGireesh Nagabhushana 			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
44456b2bdd1SGireesh Nagabhushana 	}
44556b2bdd1SGireesh Nagabhushana #else
44656b2bdd1SGireesh Nagabhushana 	/*
44756b2bdd1SGireesh Nagabhushana 	 * Not compiled with offload support and intr_count > 1.  Only NIC
44856b2bdd1SGireesh Nagabhushana 	 * queues exist and they'd better be taking direct interrupts.
44956b2bdd1SGireesh Nagabhushana 	 */
45056b2bdd1SGireesh Nagabhushana 	ASSERT(!(sc->flags & INTR_FWD));
45156b2bdd1SGireesh Nagabhushana 
45256b2bdd1SGireesh Nagabhushana 	idx %= pi->nrxq;
45356b2bdd1SGireesh Nagabhushana 	iq = &s->rxq[pi->first_rxq + idx].iq;
45456b2bdd1SGireesh Nagabhushana #endif
45556b2bdd1SGireesh Nagabhushana 
45656b2bdd1SGireesh Nagabhushana 	return (iq);
45756b2bdd1SGireesh Nagabhushana }
45856b2bdd1SGireesh Nagabhushana 
45956b2bdd1SGireesh Nagabhushana int
t4_setup_port_queues(struct port_info * pi)46056b2bdd1SGireesh Nagabhushana t4_setup_port_queues(struct port_info *pi)
46156b2bdd1SGireesh Nagabhushana {
4623dde7c95SVishal Kulkarni 	int rc = 0, i, intr_idx, j;
46356b2bdd1SGireesh Nagabhushana 	struct sge_rxq *rxq;
46456b2bdd1SGireesh Nagabhushana 	struct sge_txq *txq;
4653dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
4663dde7c95SVishal Kulkarni 	int iqid;
46756b2bdd1SGireesh Nagabhushana 	struct sge_wrq *ctrlq;
46856b2bdd1SGireesh Nagabhushana 	struct sge_ofld_rxq *ofld_rxq;
46956b2bdd1SGireesh Nagabhushana 	struct sge_wrq *ofld_txq;
47056b2bdd1SGireesh Nagabhushana #endif
47156b2bdd1SGireesh Nagabhushana 	struct adapter *sc = pi->adapter;
47256b2bdd1SGireesh Nagabhushana 	struct driver_properties *p = &sc->props;
47356b2bdd1SGireesh Nagabhushana 
47456b2bdd1SGireesh Nagabhushana 	pi->ksp_config = setup_port_config_kstats(pi);
47556b2bdd1SGireesh Nagabhushana 	pi->ksp_info   = setup_port_info_kstats(pi);
47656b2bdd1SGireesh Nagabhushana 
47756b2bdd1SGireesh Nagabhushana 	/* Interrupt vector to start from (when using multiple vectors) */
47856b2bdd1SGireesh Nagabhushana 	intr_idx = first_vector(pi);
47956b2bdd1SGireesh Nagabhushana 
48056b2bdd1SGireesh Nagabhushana 	/*
48156b2bdd1SGireesh Nagabhushana 	 * First pass over all rx queues (NIC and TOE):
48256b2bdd1SGireesh Nagabhushana 	 * a) initialize iq and fl
48356b2bdd1SGireesh Nagabhushana 	 * b) allocate queue iff it will take direct interrupts.
48456b2bdd1SGireesh Nagabhushana 	 */
48556b2bdd1SGireesh Nagabhushana 
48656b2bdd1SGireesh Nagabhushana 	for_each_rxq(pi, i, rxq) {
48756b2bdd1SGireesh Nagabhushana 
48856b2bdd1SGireesh Nagabhushana 		init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, p->qsize_rxq,
48956b2bdd1SGireesh Nagabhushana 		    RX_IQ_ESIZE);
49056b2bdd1SGireesh Nagabhushana 
49156b2bdd1SGireesh Nagabhushana 		init_fl(&rxq->fl, p->qsize_rxq / 8); /* 8 bufs in each entry */
49256b2bdd1SGireesh Nagabhushana 
4933dde7c95SVishal Kulkarni 		if ((!(sc->flags & INTR_FWD))
4943dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
4953dde7c95SVishal Kulkarni 		    || (sc->intr_count > 1 && pi->nrxq >= pi->nofldrxq)
49656b2bdd1SGireesh Nagabhushana #else
4973dde7c95SVishal Kulkarni 		    || (sc->intr_count > 1 && pi->nrxq)
49856b2bdd1SGireesh Nagabhushana #endif
4993dde7c95SVishal Kulkarni 		   ) {
50056b2bdd1SGireesh Nagabhushana 			rxq->iq.flags |= IQ_INTR;
50156b2bdd1SGireesh Nagabhushana 			rc = alloc_rxq(pi, rxq, intr_idx, i);
50256b2bdd1SGireesh Nagabhushana 			if (rc != 0)
50356b2bdd1SGireesh Nagabhushana 				goto done;
50456b2bdd1SGireesh Nagabhushana 			intr_idx++;
50556b2bdd1SGireesh Nagabhushana 		}
50656b2bdd1SGireesh Nagabhushana 
50756b2bdd1SGireesh Nagabhushana 	}
50856b2bdd1SGireesh Nagabhushana 
5093dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
51056b2bdd1SGireesh Nagabhushana 	for_each_ofld_rxq(pi, i, ofld_rxq) {
51156b2bdd1SGireesh Nagabhushana 
51256b2bdd1SGireesh Nagabhushana 		init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
51356b2bdd1SGireesh Nagabhushana 		    p->qsize_rxq, RX_IQ_ESIZE);
51456b2bdd1SGireesh Nagabhushana 
51556b2bdd1SGireesh Nagabhushana 		init_fl(&ofld_rxq->fl, p->qsize_rxq / 8);
51656b2bdd1SGireesh Nagabhushana 
51756b2bdd1SGireesh Nagabhushana 		if (!(sc->flags & INTR_FWD) ||
51856b2bdd1SGireesh Nagabhushana 		    (sc->intr_count > 1 && pi->nofldrxq > pi->nrxq)) {
51956b2bdd1SGireesh Nagabhushana 			ofld_rxq->iq.flags = IQ_INTR;
52056b2bdd1SGireesh Nagabhushana 			rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx);
52156b2bdd1SGireesh Nagabhushana 			if (rc != 0)
52256b2bdd1SGireesh Nagabhushana 				goto done;
52356b2bdd1SGireesh Nagabhushana 
52456b2bdd1SGireesh Nagabhushana 			intr_idx++;
52556b2bdd1SGireesh Nagabhushana 		}
52656b2bdd1SGireesh Nagabhushana 	}
52756b2bdd1SGireesh Nagabhushana #endif
52856b2bdd1SGireesh Nagabhushana 
52956b2bdd1SGireesh Nagabhushana 	/*
53056b2bdd1SGireesh Nagabhushana 	 * Second pass over all rx queues (NIC and TOE).  The queues forwarding
53156b2bdd1SGireesh Nagabhushana 	 * their interrupts are allocated now.
53256b2bdd1SGireesh Nagabhushana 	 */
53356b2bdd1SGireesh Nagabhushana 	j = 0;
53456b2bdd1SGireesh Nagabhushana 	for_each_rxq(pi, i, rxq) {
53556b2bdd1SGireesh Nagabhushana 		if (rxq->iq.flags & IQ_INTR)
53656b2bdd1SGireesh Nagabhushana 			continue;
53756b2bdd1SGireesh Nagabhushana 
53856b2bdd1SGireesh Nagabhushana 		intr_idx = port_intr_iq(pi, j)->abs_id;
53956b2bdd1SGireesh Nagabhushana 
54056b2bdd1SGireesh Nagabhushana 		rc = alloc_rxq(pi, rxq, intr_idx, i);
54156b2bdd1SGireesh Nagabhushana 		if (rc != 0)
54256b2bdd1SGireesh Nagabhushana 			goto done;
54356b2bdd1SGireesh Nagabhushana 		j++;
54456b2bdd1SGireesh Nagabhushana 	}
54556b2bdd1SGireesh Nagabhushana 
5463dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
54756b2bdd1SGireesh Nagabhushana 	for_each_ofld_rxq(pi, i, ofld_rxq) {
54856b2bdd1SGireesh Nagabhushana 		if (ofld_rxq->iq.flags & IQ_INTR)
54956b2bdd1SGireesh Nagabhushana 			continue;
55056b2bdd1SGireesh Nagabhushana 
55156b2bdd1SGireesh Nagabhushana 		intr_idx = port_intr_iq(pi, j)->abs_id;
55256b2bdd1SGireesh Nagabhushana 		rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx);
55356b2bdd1SGireesh Nagabhushana 		if (rc != 0)
55456b2bdd1SGireesh Nagabhushana 			goto done;
55556b2bdd1SGireesh Nagabhushana 		j++;
55656b2bdd1SGireesh Nagabhushana 	}
55756b2bdd1SGireesh Nagabhushana #endif
55856b2bdd1SGireesh Nagabhushana 	/*
55956b2bdd1SGireesh Nagabhushana 	 * Now the tx queues.  Only one pass needed.
56056b2bdd1SGireesh Nagabhushana 	 */
56156b2bdd1SGireesh Nagabhushana 	j = 0;
56256b2bdd1SGireesh Nagabhushana 	for_each_txq(pi, i, txq) {
56356b2bdd1SGireesh Nagabhushana 		uint16_t iqid;
56456b2bdd1SGireesh Nagabhushana 
56556b2bdd1SGireesh Nagabhushana 		iqid = port_intr_iq(pi, j)->cntxt_id;
566de483253SVishal Kulkarni 		init_eq(sc, &txq->eq, EQ_ETH, p->qsize_txq, pi->tx_chan, iqid);
56756b2bdd1SGireesh Nagabhushana 		rc = alloc_txq(pi, txq, i);
56856b2bdd1SGireesh Nagabhushana 		if (rc != 0)
56956b2bdd1SGireesh Nagabhushana 			goto done;
57056b2bdd1SGireesh Nagabhushana 	}
57156b2bdd1SGireesh Nagabhushana 
5723dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
57356b2bdd1SGireesh Nagabhushana 	for_each_ofld_txq(pi, i, ofld_txq) {
57456b2bdd1SGireesh Nagabhushana 		uint16_t iqid;
57556b2bdd1SGireesh Nagabhushana 
57656b2bdd1SGireesh Nagabhushana 		iqid = port_intr_iq(pi, j)->cntxt_id;
577de483253SVishal Kulkarni 		init_eq(sc, &ofld_txq->eq, EQ_OFLD, p->qsize_txq, pi->tx_chan,
57856b2bdd1SGireesh Nagabhushana 		    iqid);
57956b2bdd1SGireesh Nagabhushana 		rc = alloc_wrq(sc, pi, ofld_txq, i);
58056b2bdd1SGireesh Nagabhushana 		if (rc != 0)
58156b2bdd1SGireesh Nagabhushana 			goto done;
58256b2bdd1SGireesh Nagabhushana 	}
58356b2bdd1SGireesh Nagabhushana 
58456b2bdd1SGireesh Nagabhushana 	/*
58556b2bdd1SGireesh Nagabhushana 	 * Finally, the control queue.
58656b2bdd1SGireesh Nagabhushana 	 */
58756b2bdd1SGireesh Nagabhushana 	ctrlq = &sc->sge.ctrlq[pi->port_id];
58856b2bdd1SGireesh Nagabhushana 	iqid = port_intr_iq(pi, 0)->cntxt_id;
589de483253SVishal Kulkarni 	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid);
59056b2bdd1SGireesh Nagabhushana 	rc = alloc_wrq(sc, pi, ctrlq, 0);
5913dde7c95SVishal Kulkarni #endif
59256b2bdd1SGireesh Nagabhushana 
59356b2bdd1SGireesh Nagabhushana done:
59456b2bdd1SGireesh Nagabhushana 	if (rc != 0)
59556b2bdd1SGireesh Nagabhushana 		(void) t4_teardown_port_queues(pi);
59656b2bdd1SGireesh Nagabhushana 
59756b2bdd1SGireesh Nagabhushana 	return (rc);
59856b2bdd1SGireesh Nagabhushana }
59956b2bdd1SGireesh Nagabhushana 
60056b2bdd1SGireesh Nagabhushana /*
60156b2bdd1SGireesh Nagabhushana  * Idempotent
60256b2bdd1SGireesh Nagabhushana  */
60356b2bdd1SGireesh Nagabhushana int
t4_teardown_port_queues(struct port_info * pi)60456b2bdd1SGireesh Nagabhushana t4_teardown_port_queues(struct port_info *pi)
60556b2bdd1SGireesh Nagabhushana {
60656b2bdd1SGireesh Nagabhushana 	int i;
60756b2bdd1SGireesh Nagabhushana 	struct sge_rxq *rxq;
60856b2bdd1SGireesh Nagabhushana 	struct sge_txq *txq;
6093dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
61056b2bdd1SGireesh Nagabhushana 	struct adapter *sc = pi->adapter;
61156b2bdd1SGireesh Nagabhushana 	struct sge_ofld_rxq *ofld_rxq;
61256b2bdd1SGireesh Nagabhushana 	struct sge_wrq *ofld_txq;
61356b2bdd1SGireesh Nagabhushana #endif
61456b2bdd1SGireesh Nagabhushana 
61556b2bdd1SGireesh Nagabhushana 	if (pi->ksp_config != NULL) {
61656b2bdd1SGireesh Nagabhushana 		kstat_delete(pi->ksp_config);
61756b2bdd1SGireesh Nagabhushana 		pi->ksp_config = NULL;
61856b2bdd1SGireesh Nagabhushana 	}
61956b2bdd1SGireesh Nagabhushana 	if (pi->ksp_info != NULL) {
62056b2bdd1SGireesh Nagabhushana 		kstat_delete(pi->ksp_info);
62156b2bdd1SGireesh Nagabhushana 		pi->ksp_info = NULL;
62256b2bdd1SGireesh Nagabhushana 	}
62356b2bdd1SGireesh Nagabhushana 
6243dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
62556b2bdd1SGireesh Nagabhushana 	(void) free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
6263dde7c95SVishal Kulkarni #endif
62756b2bdd1SGireesh Nagabhushana 
62856b2bdd1SGireesh Nagabhushana 	for_each_txq(pi, i, txq) {
62956b2bdd1SGireesh Nagabhushana 		(void) free_txq(pi, txq);
63056b2bdd1SGireesh Nagabhushana 	}
63156b2bdd1SGireesh Nagabhushana 
6323dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
63356b2bdd1SGireesh Nagabhushana 	for_each_ofld_txq(pi, i, ofld_txq) {
63456b2bdd1SGireesh Nagabhushana 		(void) free_wrq(sc, ofld_txq);
63556b2bdd1SGireesh Nagabhushana 	}
63656b2bdd1SGireesh Nagabhushana 
63756b2bdd1SGireesh Nagabhushana 	for_each_ofld_rxq(pi, i, ofld_rxq) {
63856b2bdd1SGireesh Nagabhushana 		if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
63956b2bdd1SGireesh Nagabhushana 			(void) free_ofld_rxq(pi, ofld_rxq);
64056b2bdd1SGireesh Nagabhushana 	}
64156b2bdd1SGireesh Nagabhushana #endif
64256b2bdd1SGireesh Nagabhushana 
64356b2bdd1SGireesh Nagabhushana 	for_each_rxq(pi, i, rxq) {
64456b2bdd1SGireesh Nagabhushana 		if ((rxq->iq.flags & IQ_INTR) == 0)
64556b2bdd1SGireesh Nagabhushana 			(void) free_rxq(pi, rxq);
64656b2bdd1SGireesh Nagabhushana 	}
64756b2bdd1SGireesh Nagabhushana 
64856b2bdd1SGireesh Nagabhushana 	/*
64956b2bdd1SGireesh Nagabhushana 	 * Then take down the rx queues that take direct interrupts.
65056b2bdd1SGireesh Nagabhushana 	 */
65156b2bdd1SGireesh Nagabhushana 
65256b2bdd1SGireesh Nagabhushana 	for_each_rxq(pi, i, rxq) {
65356b2bdd1SGireesh Nagabhushana 		if (rxq->iq.flags & IQ_INTR)
65456b2bdd1SGireesh Nagabhushana 			(void) free_rxq(pi, rxq);
65556b2bdd1SGireesh Nagabhushana 	}
65656b2bdd1SGireesh Nagabhushana 
6573dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
65856b2bdd1SGireesh Nagabhushana 	for_each_ofld_rxq(pi, i, ofld_rxq) {
65956b2bdd1SGireesh Nagabhushana 		if (ofld_rxq->iq.flags & IQ_INTR)
66056b2bdd1SGireesh Nagabhushana 			(void) free_ofld_rxq(pi, ofld_rxq);
66156b2bdd1SGireesh Nagabhushana 	}
66256b2bdd1SGireesh Nagabhushana #endif
66356b2bdd1SGireesh Nagabhushana 
66456b2bdd1SGireesh Nagabhushana 	return (0);
66556b2bdd1SGireesh Nagabhushana }
66656b2bdd1SGireesh Nagabhushana 
66756b2bdd1SGireesh Nagabhushana /* Deals with errors and forwarded interrupts */
66856b2bdd1SGireesh Nagabhushana uint_t
t4_intr_all(caddr_t arg1,caddr_t arg2)66956b2bdd1SGireesh Nagabhushana t4_intr_all(caddr_t arg1, caddr_t arg2)
67056b2bdd1SGireesh Nagabhushana {
67156b2bdd1SGireesh Nagabhushana 
67256b2bdd1SGireesh Nagabhushana 	(void) t4_intr_err(arg1, arg2);
67356b2bdd1SGireesh Nagabhushana 	(void) t4_intr(arg1, arg2);
67456b2bdd1SGireesh Nagabhushana 
67556b2bdd1SGireesh Nagabhushana 	return (DDI_INTR_CLAIMED);
67656b2bdd1SGireesh Nagabhushana }
67756b2bdd1SGireesh Nagabhushana 
6783dde7c95SVishal Kulkarni static void
t4_intr_rx_work(struct sge_iq * iq)6793dde7c95SVishal Kulkarni t4_intr_rx_work(struct sge_iq *iq)
6803dde7c95SVishal Kulkarni {
6813dde7c95SVishal Kulkarni 	mblk_t *mp = NULL;
6823dde7c95SVishal Kulkarni 	struct sge_rxq *rxq = iq_to_rxq(iq);	/* Use iff iq is part of rxq */
6833dde7c95SVishal Kulkarni 	RXQ_LOCK(rxq);
6843dde7c95SVishal Kulkarni 	if (!iq->polling) {
6853dde7c95SVishal Kulkarni 		mp = t4_ring_rx(rxq, iq->qsize/8);
6863dde7c95SVishal Kulkarni 		t4_write_reg(iq->adapter, MYPF_REG(A_SGE_PF_GTS),
6873dde7c95SVishal Kulkarni 		     V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
6883dde7c95SVishal Kulkarni 	}
6893dde7c95SVishal Kulkarni 	RXQ_UNLOCK(rxq);
6903dde7c95SVishal Kulkarni 	if (mp != NULL)
6913dde7c95SVishal Kulkarni 		mac_rx_ring(rxq->port->mh, rxq->ring_handle, mp,
6923dde7c95SVishal Kulkarni 			    rxq->ring_gen_num);
6933dde7c95SVishal Kulkarni }
6943dde7c95SVishal Kulkarni 
69556b2bdd1SGireesh Nagabhushana /* Deals with interrupts on the given ingress queue */
69656b2bdd1SGireesh Nagabhushana /* ARGSUSED */
69756b2bdd1SGireesh Nagabhushana uint_t
t4_intr(caddr_t arg1,caddr_t arg2)69856b2bdd1SGireesh Nagabhushana t4_intr(caddr_t arg1, caddr_t arg2)
69956b2bdd1SGireesh Nagabhushana {
70056b2bdd1SGireesh Nagabhushana 	struct sge_iq *iq = (struct sge_iq *)arg2;
7013dde7c95SVishal Kulkarni 	int state;
70256b2bdd1SGireesh Nagabhushana 
7033dde7c95SVishal Kulkarni 	/* Right now receive polling is only enabled for MSI-X and
7043dde7c95SVishal Kulkarni 	 * when we have enough msi-x vectors i.e no interrupt forwarding.
7053dde7c95SVishal Kulkarni 	 */
7063dde7c95SVishal Kulkarni 	if (iq->adapter->props.multi_rings) {
7073dde7c95SVishal Kulkarni 		t4_intr_rx_work(iq);
7083dde7c95SVishal Kulkarni 	} else {
7093dde7c95SVishal Kulkarni 		state = atomic_cas_uint(&iq->state, IQS_IDLE, IQS_BUSY);
7103dde7c95SVishal Kulkarni 		if (state == IQS_IDLE) {
7113dde7c95SVishal Kulkarni 			(void) service_iq(iq, 0);
7123dde7c95SVishal Kulkarni 			(void) atomic_cas_uint(&iq->state, IQS_BUSY, IQS_IDLE);
7133dde7c95SVishal Kulkarni 		}
71456b2bdd1SGireesh Nagabhushana 	}
71556b2bdd1SGireesh Nagabhushana 	return (DDI_INTR_CLAIMED);
71656b2bdd1SGireesh Nagabhushana }
71756b2bdd1SGireesh Nagabhushana 
71856b2bdd1SGireesh Nagabhushana /* Deals with error interrupts */
71956b2bdd1SGireesh Nagabhushana /* ARGSUSED */
72056b2bdd1SGireesh Nagabhushana uint_t
t4_intr_err(caddr_t arg1,caddr_t arg2)72156b2bdd1SGireesh Nagabhushana t4_intr_err(caddr_t arg1, caddr_t arg2)
72256b2bdd1SGireesh Nagabhushana {
72356b2bdd1SGireesh Nagabhushana 	/* LINTED: E_BAD_PTR_CAST_ALIGN */
72456b2bdd1SGireesh Nagabhushana 	struct adapter *sc = (struct adapter *)arg1;
72556b2bdd1SGireesh Nagabhushana 
72656b2bdd1SGireesh Nagabhushana 	t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0);
72756b2bdd1SGireesh Nagabhushana 	(void) t4_slow_intr_handler(sc);
72856b2bdd1SGireesh Nagabhushana 
72956b2bdd1SGireesh Nagabhushana 	return (DDI_INTR_CLAIMED);
73056b2bdd1SGireesh Nagabhushana }
73156b2bdd1SGireesh Nagabhushana 
7323dde7c95SVishal Kulkarni /*
7333dde7c95SVishal Kulkarni  * t4_ring_rx - Process responses from an SGE response queue.
7343dde7c95SVishal Kulkarni  *
7353dde7c95SVishal Kulkarni  * This function processes responses from an SGE response queue up to the supplied budget.
7363dde7c95SVishal Kulkarni  * Responses include received packets as well as control messages from FW
7373dde7c95SVishal Kulkarni  * or HW.
7383dde7c95SVishal Kulkarni  * It returns a chain of mblks containing the received data, to be
7393dde7c95SVishal Kulkarni  * passed up to mac_ring_rx().
7403dde7c95SVishal Kulkarni  */
7413dde7c95SVishal Kulkarni mblk_t *
t4_ring_rx(struct sge_rxq * rxq,int budget)7423dde7c95SVishal Kulkarni t4_ring_rx(struct sge_rxq *rxq, int budget)
7433dde7c95SVishal Kulkarni {
7443dde7c95SVishal Kulkarni 	struct sge_iq *iq = &rxq->iq;
7453dde7c95SVishal Kulkarni 	struct sge_fl *fl = &rxq->fl;           /* Use iff IQ_HAS_FL */
7463dde7c95SVishal Kulkarni 	struct adapter *sc = iq->adapter;
7473dde7c95SVishal Kulkarni 	struct rsp_ctrl *ctrl;
7483dde7c95SVishal Kulkarni 	const struct rss_header *rss;
7493dde7c95SVishal Kulkarni 	int ndescs = 0, fl_bufs_used = 0;
7503dde7c95SVishal Kulkarni 	int rsp_type;
7513dde7c95SVishal Kulkarni 	uint32_t lq;
7523dde7c95SVishal Kulkarni 	mblk_t *mblk_head = NULL, **mblk_tail, *m;
7533dde7c95SVishal Kulkarni 	struct cpl_rx_pkt *cpl;
7543dde7c95SVishal Kulkarni 	uint32_t received_bytes = 0, pkt_len = 0;
7553dde7c95SVishal Kulkarni 	bool csum_ok;
7563dde7c95SVishal Kulkarni 	uint16_t err_vec;
7573dde7c95SVishal Kulkarni 
7583dde7c95SVishal Kulkarni 	mblk_tail = &mblk_head;
7593dde7c95SVishal Kulkarni 
7603dde7c95SVishal Kulkarni 	while (is_new_response(iq, &ctrl)) {
7613dde7c95SVishal Kulkarni 
7623dde7c95SVishal Kulkarni 		membar_consumer();
7633dde7c95SVishal Kulkarni 
7643dde7c95SVishal Kulkarni 		m = NULL;
7653dde7c95SVishal Kulkarni 		rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
7663dde7c95SVishal Kulkarni 		lq = be32_to_cpu(ctrl->pldbuflen_qid);
7673dde7c95SVishal Kulkarni 		rss = (const void *)iq->cdesc;
7683dde7c95SVishal Kulkarni 
7693dde7c95SVishal Kulkarni 		switch (rsp_type) {
7703dde7c95SVishal Kulkarni 		case X_RSPD_TYPE_FLBUF:
7713dde7c95SVishal Kulkarni 
7723dde7c95SVishal Kulkarni 			ASSERT(iq->flags & IQ_HAS_FL);
7733dde7c95SVishal Kulkarni 
7743dde7c95SVishal Kulkarni 			if (CPL_RX_PKT == rss->opcode) {
7753dde7c95SVishal Kulkarni 				cpl = (void *)(rss + 1);
7763dde7c95SVishal Kulkarni 				pkt_len = be16_to_cpu(cpl->len);
7773dde7c95SVishal Kulkarni 
7783dde7c95SVishal Kulkarni 				if (iq->polling && ((received_bytes + pkt_len) > budget))
7793dde7c95SVishal Kulkarni 					goto done;
7803dde7c95SVishal Kulkarni 
7813dde7c95SVishal Kulkarni 				m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
782bbb9d5d6SJohn Levon 				if (m == NULL)
783bbb9d5d6SJohn Levon 					goto done;
7843dde7c95SVishal Kulkarni 
7853dde7c95SVishal Kulkarni 				iq->intr_next = iq->intr_params;
7863dde7c95SVishal Kulkarni 				m->b_rptr += sc->sge.pktshift;
7873dde7c95SVishal Kulkarni 				if (sc->params.tp.rx_pkt_encap)
7883dde7c95SVishal Kulkarni 				/* It is enabled only in T6 config file */
7893dde7c95SVishal Kulkarni 					err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
7903dde7c95SVishal Kulkarni 				else
7913dde7c95SVishal Kulkarni 					err_vec = ntohs(cpl->err_vec);
7923dde7c95SVishal Kulkarni 
7933dde7c95SVishal Kulkarni 				csum_ok = cpl->csum_calc && !err_vec;
7943dde7c95SVishal Kulkarni 
7953dde7c95SVishal Kulkarni 				/* TODO: what about cpl->ip_frag? */
7963dde7c95SVishal Kulkarni 				if (csum_ok && !cpl->ip_frag) {
7973dde7c95SVishal Kulkarni 					mac_hcksum_set(m, 0, 0, 0, 0xffff,
7983dde7c95SVishal Kulkarni 					    HCK_FULLCKSUM_OK | HCK_FULLCKSUM |
7993dde7c95SVishal Kulkarni 					    HCK_IPV4_HDRCKSUM_OK);
8003dde7c95SVishal Kulkarni 					rxq->rxcsum++;
8013dde7c95SVishal Kulkarni 				}
8023dde7c95SVishal Kulkarni 				rxq->rxpkts++;
8033dde7c95SVishal Kulkarni 				rxq->rxbytes += pkt_len;
8043dde7c95SVishal Kulkarni 				received_bytes += pkt_len;
8053dde7c95SVishal Kulkarni 
8063dde7c95SVishal Kulkarni 				*mblk_tail = m;
8073dde7c95SVishal Kulkarni 				mblk_tail = &m->b_next;
8083dde7c95SVishal Kulkarni 
8093dde7c95SVishal Kulkarni 				break;
8103dde7c95SVishal Kulkarni 			}
8113dde7c95SVishal Kulkarni 
8123dde7c95SVishal Kulkarni 			m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
813bbb9d5d6SJohn Levon 			if (m == NULL)
814bbb9d5d6SJohn Levon 				goto done;
8154e0c5effSToomas Soome 			/* FALLTHROUGH */
8163dde7c95SVishal Kulkarni 
8173dde7c95SVishal Kulkarni 		case X_RSPD_TYPE_CPL:
8183dde7c95SVishal Kulkarni 			ASSERT(rss->opcode < NUM_CPL_CMDS);
8193dde7c95SVishal Kulkarni 			sc->cpl_handler[rss->opcode](iq, rss, m);
8203dde7c95SVishal Kulkarni 			break;
8213dde7c95SVishal Kulkarni 
8223dde7c95SVishal Kulkarni 		default:
8233dde7c95SVishal Kulkarni 			break;
8243dde7c95SVishal Kulkarni 		}
8253dde7c95SVishal Kulkarni 		iq_next(iq);
8263dde7c95SVishal Kulkarni 		++ndescs;
8273dde7c95SVishal Kulkarni 		if (!iq->polling && (ndescs == budget))
8283dde7c95SVishal Kulkarni 			break;
8293dde7c95SVishal Kulkarni 	}
8303dde7c95SVishal Kulkarni 
8313dde7c95SVishal Kulkarni done:
8323dde7c95SVishal Kulkarni 
8333dde7c95SVishal Kulkarni 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
8343dde7c95SVishal Kulkarni 		     V_CIDXINC(ndescs) | V_INGRESSQID(iq->cntxt_id) |
8353dde7c95SVishal Kulkarni 		     V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
8363dde7c95SVishal Kulkarni 
8373dde7c95SVishal Kulkarni 	if ((fl_bufs_used > 0) || (iq->flags & IQ_HAS_FL)) {
8383dde7c95SVishal Kulkarni 		int starved;
8393dde7c95SVishal Kulkarni 		FL_LOCK(fl);
8403dde7c95SVishal Kulkarni 		fl->needed += fl_bufs_used;
8413dde7c95SVishal Kulkarni 		starved = refill_fl(sc, fl, fl->cap / 8);
8423dde7c95SVishal Kulkarni 		FL_UNLOCK(fl);
8433dde7c95SVishal Kulkarni 		if (starved)
8443dde7c95SVishal Kulkarni 			add_fl_to_sfl(sc, fl);
8453dde7c95SVishal Kulkarni 	}
8463dde7c95SVishal Kulkarni 	return (mblk_head);
8473dde7c95SVishal Kulkarni }
8483dde7c95SVishal Kulkarni 
84956b2bdd1SGireesh Nagabhushana /*
85056b2bdd1SGireesh Nagabhushana  * Deals with anything and everything on the given ingress queue.
85156b2bdd1SGireesh Nagabhushana  */
85256b2bdd1SGireesh Nagabhushana static int
service_iq(struct sge_iq * iq,int budget)85356b2bdd1SGireesh Nagabhushana service_iq(struct sge_iq *iq, int budget)
85456b2bdd1SGireesh Nagabhushana {
85556b2bdd1SGireesh Nagabhushana 	struct sge_iq *q;
85656b2bdd1SGireesh Nagabhushana 	struct sge_rxq *rxq = iq_to_rxq(iq);	/* Use iff iq is part of rxq */
85756b2bdd1SGireesh Nagabhushana 	struct sge_fl *fl = &rxq->fl;		/* Use iff IQ_HAS_FL */
85856b2bdd1SGireesh Nagabhushana 	struct adapter *sc = iq->adapter;
85956b2bdd1SGireesh Nagabhushana 	struct rsp_ctrl *ctrl;
86056b2bdd1SGireesh Nagabhushana 	const struct rss_header *rss;
86156b2bdd1SGireesh Nagabhushana 	int ndescs = 0, limit, fl_bufs_used = 0;
86256b2bdd1SGireesh Nagabhushana 	int rsp_type;
86356b2bdd1SGireesh Nagabhushana 	uint32_t lq;
864bbb9d5d6SJohn Levon 	int starved;
86556b2bdd1SGireesh Nagabhushana 	mblk_t *m;
86656b2bdd1SGireesh Nagabhushana 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
86756b2bdd1SGireesh Nagabhushana 
86856b2bdd1SGireesh Nagabhushana 	limit = budget ? budget : iq->qsize / 8;
86956b2bdd1SGireesh Nagabhushana 
87056b2bdd1SGireesh Nagabhushana 	/*
87156b2bdd1SGireesh Nagabhushana 	 * We always come back and check the descriptor ring for new indirect
87256b2bdd1SGireesh Nagabhushana 	 * interrupts and other responses after running a single handler.
87356b2bdd1SGireesh Nagabhushana 	 */
87456b2bdd1SGireesh Nagabhushana 	for (;;) {
87556b2bdd1SGireesh Nagabhushana 		while (is_new_response(iq, &ctrl)) {
87656b2bdd1SGireesh Nagabhushana 
87756b2bdd1SGireesh Nagabhushana 			membar_consumer();
87856b2bdd1SGireesh Nagabhushana 
87956b2bdd1SGireesh Nagabhushana 			m = NULL;
88056b2bdd1SGireesh Nagabhushana 			rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
88156b2bdd1SGireesh Nagabhushana 			lq = be32_to_cpu(ctrl->pldbuflen_qid);
88256b2bdd1SGireesh Nagabhushana 			rss = (const void *)iq->cdesc;
88356b2bdd1SGireesh Nagabhushana 
88456b2bdd1SGireesh Nagabhushana 			switch (rsp_type) {
88556b2bdd1SGireesh Nagabhushana 			case X_RSPD_TYPE_FLBUF:
88656b2bdd1SGireesh Nagabhushana 
88756b2bdd1SGireesh Nagabhushana 				ASSERT(iq->flags & IQ_HAS_FL);
88856b2bdd1SGireesh Nagabhushana 
8893dde7c95SVishal Kulkarni 				m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
89056b2bdd1SGireesh Nagabhushana 				if (m == NULL) {
891bbb9d5d6SJohn Levon 					/*
892bbb9d5d6SJohn Levon 					 * Rearm the iq with a
893bbb9d5d6SJohn Levon 					 * longer-than-default timer
894bbb9d5d6SJohn Levon 					 */
895bbb9d5d6SJohn Levon 					t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
896bbb9d5d6SJohn Levon 							V_INGRESSQID((u32)iq->cntxt_id) |
897bbb9d5d6SJohn Levon 							V_SEINTARM(V_QINTR_TIMER_IDX(SGE_NTIMERS-1)));
898bbb9d5d6SJohn Levon 					if (fl_bufs_used > 0) {
899bbb9d5d6SJohn Levon 						ASSERT(iq->flags & IQ_HAS_FL);
900bbb9d5d6SJohn Levon 						FL_LOCK(fl);
901bbb9d5d6SJohn Levon 						fl->needed += fl_bufs_used;
902bbb9d5d6SJohn Levon 						starved = refill_fl(sc, fl, fl->cap / 8);
903bbb9d5d6SJohn Levon 						FL_UNLOCK(fl);
904bbb9d5d6SJohn Levon 						if (starved)
905bbb9d5d6SJohn Levon 							add_fl_to_sfl(sc, fl);
906bbb9d5d6SJohn Levon 					}
907bbb9d5d6SJohn Levon 					return (0);
90856b2bdd1SGireesh Nagabhushana 				}
90956b2bdd1SGireesh Nagabhushana 
91056b2bdd1SGireesh Nagabhushana 			/* FALLTHRU */
91156b2bdd1SGireesh Nagabhushana 			case X_RSPD_TYPE_CPL:
91256b2bdd1SGireesh Nagabhushana 
91356b2bdd1SGireesh Nagabhushana 				ASSERT(rss->opcode < NUM_CPL_CMDS);
91456b2bdd1SGireesh Nagabhushana 				sc->cpl_handler[rss->opcode](iq, rss, m);
91556b2bdd1SGireesh Nagabhushana 				break;
91656b2bdd1SGireesh Nagabhushana 
91756b2bdd1SGireesh Nagabhushana 			case X_RSPD_TYPE_INTR:
91856b2bdd1SGireesh Nagabhushana 
91956b2bdd1SGireesh Nagabhushana 				/*
92056b2bdd1SGireesh Nagabhushana 				 * Interrupts should be forwarded only to queues
92156b2bdd1SGireesh Nagabhushana 				 * that are not forwarding their interrupts.
92256b2bdd1SGireesh Nagabhushana 				 * This means service_iq can recurse but only 1
92356b2bdd1SGireesh Nagabhushana 				 * level deep.
92456b2bdd1SGireesh Nagabhushana 				 */
92556b2bdd1SGireesh Nagabhushana 				ASSERT(budget == 0);
92656b2bdd1SGireesh Nagabhushana 
92756b2bdd1SGireesh Nagabhushana 				q = sc->sge.iqmap[lq - sc->sge.iq_start];
92856b2bdd1SGireesh Nagabhushana 				if (atomic_cas_uint(&q->state, IQS_IDLE,
92956b2bdd1SGireesh Nagabhushana 				    IQS_BUSY) == IQS_IDLE) {
93056b2bdd1SGireesh Nagabhushana 					if (service_iq(q, q->qsize / 8) == 0) {
93156b2bdd1SGireesh Nagabhushana 						(void) atomic_cas_uint(
93256b2bdd1SGireesh Nagabhushana 						    &q->state, IQS_BUSY,
93356b2bdd1SGireesh Nagabhushana 						    IQS_IDLE);
93456b2bdd1SGireesh Nagabhushana 					} else {
93556b2bdd1SGireesh Nagabhushana 						STAILQ_INSERT_TAIL(&iql, q,
93656b2bdd1SGireesh Nagabhushana 						    link);
93756b2bdd1SGireesh Nagabhushana 					}
93856b2bdd1SGireesh Nagabhushana 				}
93956b2bdd1SGireesh Nagabhushana 				break;
94056b2bdd1SGireesh Nagabhushana 
94156b2bdd1SGireesh Nagabhushana 			default:
94256b2bdd1SGireesh Nagabhushana 				break;
94356b2bdd1SGireesh Nagabhushana 			}
94456b2bdd1SGireesh Nagabhushana 
94556b2bdd1SGireesh Nagabhushana 			iq_next(iq);
94656b2bdd1SGireesh Nagabhushana 			if (++ndescs == limit) {
94756b2bdd1SGireesh Nagabhushana 				t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
94856b2bdd1SGireesh Nagabhushana 				    V_CIDXINC(ndescs) |
94956b2bdd1SGireesh Nagabhushana 				    V_INGRESSQID(iq->cntxt_id) |
95056b2bdd1SGireesh Nagabhushana 				    V_SEINTARM(V_QINTR_TIMER_IDX(
95156b2bdd1SGireesh Nagabhushana 				    X_TIMERREG_UPDATE_CIDX)));
95256b2bdd1SGireesh Nagabhushana 				ndescs = 0;
95356b2bdd1SGireesh Nagabhushana 
95456b2bdd1SGireesh Nagabhushana 				if (fl_bufs_used > 0) {
95556b2bdd1SGireesh Nagabhushana 					ASSERT(iq->flags & IQ_HAS_FL);
95656b2bdd1SGireesh Nagabhushana 					FL_LOCK(fl);
95756b2bdd1SGireesh Nagabhushana 					fl->needed += fl_bufs_used;
95856b2bdd1SGireesh Nagabhushana 					(void) refill_fl(sc, fl, fl->cap / 8);
95956b2bdd1SGireesh Nagabhushana 					FL_UNLOCK(fl);
96056b2bdd1SGireesh Nagabhushana 					fl_bufs_used = 0;
96156b2bdd1SGireesh Nagabhushana 				}
96256b2bdd1SGireesh Nagabhushana 
96356b2bdd1SGireesh Nagabhushana 				if (budget != 0)
96456b2bdd1SGireesh Nagabhushana 					return (EINPROGRESS);
96556b2bdd1SGireesh Nagabhushana 			}
96656b2bdd1SGireesh Nagabhushana 		}
96756b2bdd1SGireesh Nagabhushana 
96856b2bdd1SGireesh Nagabhushana 		if (STAILQ_EMPTY(&iql) != 0)
96956b2bdd1SGireesh Nagabhushana 			break;
97056b2bdd1SGireesh Nagabhushana 
97156b2bdd1SGireesh Nagabhushana 		/*
97256b2bdd1SGireesh Nagabhushana 		 * Process the head only, and send it to the back of the list if
97356b2bdd1SGireesh Nagabhushana 		 * it's still not done.
97456b2bdd1SGireesh Nagabhushana 		 */
97556b2bdd1SGireesh Nagabhushana 		q = STAILQ_FIRST(&iql);
97656b2bdd1SGireesh Nagabhushana 		STAILQ_REMOVE_HEAD(&iql, link);
97756b2bdd1SGireesh Nagabhushana 		if (service_iq(q, q->qsize / 8) == 0)
97856b2bdd1SGireesh Nagabhushana 			(void) atomic_cas_uint(&q->state, IQS_BUSY, IQS_IDLE);
97956b2bdd1SGireesh Nagabhushana 		else
98056b2bdd1SGireesh Nagabhushana 			STAILQ_INSERT_TAIL(&iql, q, link);
98156b2bdd1SGireesh Nagabhushana 	}
98256b2bdd1SGireesh Nagabhushana 
98356b2bdd1SGireesh Nagabhushana 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
98456b2bdd1SGireesh Nagabhushana 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
98556b2bdd1SGireesh Nagabhushana 
98656b2bdd1SGireesh Nagabhushana 	if (iq->flags & IQ_HAS_FL) {
98756b2bdd1SGireesh Nagabhushana 
98856b2bdd1SGireesh Nagabhushana 		FL_LOCK(fl);
98956b2bdd1SGireesh Nagabhushana 		fl->needed += fl_bufs_used;
99056b2bdd1SGireesh Nagabhushana 		starved = refill_fl(sc, fl, fl->cap / 4);
99156b2bdd1SGireesh Nagabhushana 		FL_UNLOCK(fl);
99256b2bdd1SGireesh Nagabhushana 		if (starved != 0)
99356b2bdd1SGireesh Nagabhushana 			add_fl_to_sfl(sc, fl);
99456b2bdd1SGireesh Nagabhushana 	}
99556b2bdd1SGireesh Nagabhushana 
99656b2bdd1SGireesh Nagabhushana 	return (0);
99756b2bdd1SGireesh Nagabhushana }
99856b2bdd1SGireesh Nagabhushana 
9993dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
100056b2bdd1SGireesh Nagabhushana int
t4_mgmt_tx(struct adapter * sc,mblk_t * m)100156b2bdd1SGireesh Nagabhushana t4_mgmt_tx(struct adapter *sc, mblk_t *m)
100256b2bdd1SGireesh Nagabhushana {
100356b2bdd1SGireesh Nagabhushana 	return (t4_wrq_tx(sc, &sc->sge.mgmtq, m));
100456b2bdd1SGireesh Nagabhushana }
100556b2bdd1SGireesh Nagabhushana 
100656b2bdd1SGireesh Nagabhushana /*
100756b2bdd1SGireesh Nagabhushana  * Doesn't fail.  Holds on to work requests it can't send right away.
100856b2bdd1SGireesh Nagabhushana  */
100956b2bdd1SGireesh Nagabhushana int
t4_wrq_tx_locked(struct adapter * sc,struct sge_wrq * wrq,mblk_t * m0)101056b2bdd1SGireesh Nagabhushana t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, mblk_t *m0)
101156b2bdd1SGireesh Nagabhushana {
101256b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &wrq->eq;
101356b2bdd1SGireesh Nagabhushana 	struct mblk_pair *wr_list = &wrq->wr_list;
101456b2bdd1SGireesh Nagabhushana 	int can_reclaim;
101556b2bdd1SGireesh Nagabhushana 	caddr_t dst;
101656b2bdd1SGireesh Nagabhushana 	mblk_t *wr, *next;
101756b2bdd1SGireesh Nagabhushana 
101856b2bdd1SGireesh Nagabhushana 	TXQ_LOCK_ASSERT_OWNED(wrq);
10193dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
102056b2bdd1SGireesh Nagabhushana 	ASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD ||
102156b2bdd1SGireesh Nagabhushana 	    (eq->flags & EQ_TYPEMASK) == EQ_CTRL);
102256b2bdd1SGireesh Nagabhushana #else
102356b2bdd1SGireesh Nagabhushana 	ASSERT((eq->flags & EQ_TYPEMASK) == EQ_CTRL);
102456b2bdd1SGireesh Nagabhushana #endif
102556b2bdd1SGireesh Nagabhushana 
102656b2bdd1SGireesh Nagabhushana 	if (m0 != NULL) {
102756b2bdd1SGireesh Nagabhushana 		if (wr_list->head != NULL)
102856b2bdd1SGireesh Nagabhushana 			wr_list->tail->b_next = m0;
102956b2bdd1SGireesh Nagabhushana 		else
103056b2bdd1SGireesh Nagabhushana 			wr_list->head = m0;
103156b2bdd1SGireesh Nagabhushana 		while (m0->b_next)
103256b2bdd1SGireesh Nagabhushana 			m0 = m0->b_next;
103356b2bdd1SGireesh Nagabhushana 		wr_list->tail = m0;
103456b2bdd1SGireesh Nagabhushana 	}
103556b2bdd1SGireesh Nagabhushana 
103656b2bdd1SGireesh Nagabhushana 	can_reclaim = reclaimable(eq);
103756b2bdd1SGireesh Nagabhushana 	eq->cidx += can_reclaim;
103856b2bdd1SGireesh Nagabhushana 	eq->avail += can_reclaim;
103956b2bdd1SGireesh Nagabhushana 	if (eq->cidx >= eq->cap)
104056b2bdd1SGireesh Nagabhushana 		eq->cidx -= eq->cap;
104156b2bdd1SGireesh Nagabhushana 
104256b2bdd1SGireesh Nagabhushana 	for (wr = wr_list->head; wr; wr = next) {
104356b2bdd1SGireesh Nagabhushana 		int ndesc, len = 0;
104456b2bdd1SGireesh Nagabhushana 		mblk_t *m;
104556b2bdd1SGireesh Nagabhushana 
104656b2bdd1SGireesh Nagabhushana 		next = wr->b_next;
104756b2bdd1SGireesh Nagabhushana 		wr->b_next = NULL;
104856b2bdd1SGireesh Nagabhushana 
104956b2bdd1SGireesh Nagabhushana 		for (m = wr; m; m = m->b_cont)
105056b2bdd1SGireesh Nagabhushana 			len += MBLKL(m);
105156b2bdd1SGireesh Nagabhushana 
105256b2bdd1SGireesh Nagabhushana 		ASSERT(len > 0 && (len & 0x7) == 0);
105356b2bdd1SGireesh Nagabhushana 		ASSERT(len <= SGE_MAX_WR_LEN);
105456b2bdd1SGireesh Nagabhushana 
105556b2bdd1SGireesh Nagabhushana 		ndesc = howmany(len, EQ_ESIZE);
105656b2bdd1SGireesh Nagabhushana 		if (eq->avail < ndesc) {
105756b2bdd1SGireesh Nagabhushana 			wr->b_next = next;
105856b2bdd1SGireesh Nagabhushana 			wrq->no_desc++;
105956b2bdd1SGireesh Nagabhushana 			break;
106056b2bdd1SGireesh Nagabhushana 		}
106156b2bdd1SGireesh Nagabhushana 
106256b2bdd1SGireesh Nagabhushana 		dst = (void *)&eq->desc[eq->pidx];
106356b2bdd1SGireesh Nagabhushana 		for (m = wr; m; m = m->b_cont)
106456b2bdd1SGireesh Nagabhushana 			copy_to_txd(eq, (void *)m->b_rptr, &dst, MBLKL(m));
106556b2bdd1SGireesh Nagabhushana 
106656b2bdd1SGireesh Nagabhushana 		eq->pidx += ndesc;
106756b2bdd1SGireesh Nagabhushana 		eq->avail -= ndesc;
106856b2bdd1SGireesh Nagabhushana 		if (eq->pidx >= eq->cap)
106956b2bdd1SGireesh Nagabhushana 			eq->pidx -= eq->cap;
107056b2bdd1SGireesh Nagabhushana 
107156b2bdd1SGireesh Nagabhushana 		eq->pending += ndesc;
107256b2bdd1SGireesh Nagabhushana 		if (eq->pending > 16)
107356b2bdd1SGireesh Nagabhushana 			ring_tx_db(sc, eq);
107456b2bdd1SGireesh Nagabhushana 
107556b2bdd1SGireesh Nagabhushana 		wrq->tx_wrs++;
107656b2bdd1SGireesh Nagabhushana 		freemsg(wr);
107756b2bdd1SGireesh Nagabhushana 
107856b2bdd1SGireesh Nagabhushana 		if (eq->avail < 8) {
107956b2bdd1SGireesh Nagabhushana 			can_reclaim = reclaimable(eq);
108056b2bdd1SGireesh Nagabhushana 			eq->cidx += can_reclaim;
108156b2bdd1SGireesh Nagabhushana 			eq->avail += can_reclaim;
108256b2bdd1SGireesh Nagabhushana 			if (eq->cidx >= eq->cap)
108356b2bdd1SGireesh Nagabhushana 				eq->cidx -= eq->cap;
108456b2bdd1SGireesh Nagabhushana 		}
108556b2bdd1SGireesh Nagabhushana 	}
108656b2bdd1SGireesh Nagabhushana 
108756b2bdd1SGireesh Nagabhushana 	if (eq->pending != 0)
108856b2bdd1SGireesh Nagabhushana 		ring_tx_db(sc, eq);
108956b2bdd1SGireesh Nagabhushana 
109056b2bdd1SGireesh Nagabhushana 	if (wr == NULL)
109156b2bdd1SGireesh Nagabhushana 		wr_list->head = wr_list->tail = NULL;
109256b2bdd1SGireesh Nagabhushana 	else {
109356b2bdd1SGireesh Nagabhushana 		wr_list->head = wr;
109456b2bdd1SGireesh Nagabhushana 
109556b2bdd1SGireesh Nagabhushana 		ASSERT(wr_list->tail->b_next == NULL);
109656b2bdd1SGireesh Nagabhushana 	}
109756b2bdd1SGireesh Nagabhushana 
109856b2bdd1SGireesh Nagabhushana 	return (0);
109956b2bdd1SGireesh Nagabhushana }
11003dde7c95SVishal Kulkarni #endif
110156b2bdd1SGireesh Nagabhushana 
110256b2bdd1SGireesh Nagabhushana /* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
110356b2bdd1SGireesh Nagabhushana #define	TXPKTS_PKT_HDR ((\
110456b2bdd1SGireesh Nagabhushana 	sizeof (struct ulp_txpkt) + \
110556b2bdd1SGireesh Nagabhushana 	sizeof (struct ulptx_idata) + \
110656b2bdd1SGireesh Nagabhushana 	sizeof (struct cpl_tx_pkt_core)) / 8)
110756b2bdd1SGireesh Nagabhushana 
110856b2bdd1SGireesh Nagabhushana /* Header of a coalesced tx WR, before SGL of first packet (in flits) */
110956b2bdd1SGireesh Nagabhushana #define	TXPKTS_WR_HDR (\
111056b2bdd1SGireesh Nagabhushana 	sizeof (struct fw_eth_tx_pkts_wr) / 8 + \
111156b2bdd1SGireesh Nagabhushana 	TXPKTS_PKT_HDR)
111256b2bdd1SGireesh Nagabhushana 
111356b2bdd1SGireesh Nagabhushana /* Header of a tx WR, before SGL of first packet (in flits) */
111456b2bdd1SGireesh Nagabhushana #define	TXPKT_WR_HDR ((\
111556b2bdd1SGireesh Nagabhushana 	sizeof (struct fw_eth_tx_pkt_wr) + \
111656b2bdd1SGireesh Nagabhushana 	sizeof (struct cpl_tx_pkt_core)) / 8)
111756b2bdd1SGireesh Nagabhushana 
111856b2bdd1SGireesh Nagabhushana /* Header of a tx LSO WR, before SGL of first packet (in flits) */
111956b2bdd1SGireesh Nagabhushana #define	TXPKT_LSO_WR_HDR ((\
112056b2bdd1SGireesh Nagabhushana 	sizeof (struct fw_eth_tx_pkt_wr) + \
1121de483253SVishal Kulkarni 	sizeof(struct cpl_tx_pkt_lso_core) + \
112256b2bdd1SGireesh Nagabhushana 	sizeof (struct cpl_tx_pkt_core)) / 8)
112356b2bdd1SGireesh Nagabhushana 
112456b2bdd1SGireesh Nagabhushana mblk_t *
t4_eth_tx(void * arg,mblk_t * frame)11253dde7c95SVishal Kulkarni t4_eth_tx(void *arg, mblk_t *frame)
112656b2bdd1SGireesh Nagabhushana {
11273dde7c95SVishal Kulkarni 	struct sge_txq *txq = (struct sge_txq *) arg;
11283dde7c95SVishal Kulkarni 	struct port_info *pi = txq->port;
112956b2bdd1SGireesh Nagabhushana 	struct adapter *sc = pi->adapter;
113056b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &txq->eq;
113156b2bdd1SGireesh Nagabhushana 	mblk_t *next_frame;
113256b2bdd1SGireesh Nagabhushana 	int rc, coalescing;
113356b2bdd1SGireesh Nagabhushana 	struct txpkts txpkts;
113456b2bdd1SGireesh Nagabhushana 	struct txinfo txinfo;
113556b2bdd1SGireesh Nagabhushana 
113656b2bdd1SGireesh Nagabhushana 	txpkts.npkt = 0; /* indicates there's nothing in txpkts */
113756b2bdd1SGireesh Nagabhushana 	coalescing = 0;
113856b2bdd1SGireesh Nagabhushana 
113956b2bdd1SGireesh Nagabhushana 	TXQ_LOCK(txq);
114056b2bdd1SGireesh Nagabhushana 	if (eq->avail < 8)
114156b2bdd1SGireesh Nagabhushana 		(void) reclaim_tx_descs(txq, 8);
114256b2bdd1SGireesh Nagabhushana 	for (; frame; frame = next_frame) {
114356b2bdd1SGireesh Nagabhushana 
114456b2bdd1SGireesh Nagabhushana 		if (eq->avail < 8)
114556b2bdd1SGireesh Nagabhushana 			break;
114656b2bdd1SGireesh Nagabhushana 
114756b2bdd1SGireesh Nagabhushana 		next_frame = frame->b_next;
114856b2bdd1SGireesh Nagabhushana 		frame->b_next = NULL;
114956b2bdd1SGireesh Nagabhushana 
115056b2bdd1SGireesh Nagabhushana 		if (next_frame != NULL)
115156b2bdd1SGireesh Nagabhushana 			coalescing = 1;
115256b2bdd1SGireesh Nagabhushana 
115356b2bdd1SGireesh Nagabhushana 		rc = get_frame_txinfo(txq, &frame, &txinfo, coalescing);
115456b2bdd1SGireesh Nagabhushana 		if (rc != 0) {
115556b2bdd1SGireesh Nagabhushana 			if (rc == ENOMEM) {
115656b2bdd1SGireesh Nagabhushana 
115756b2bdd1SGireesh Nagabhushana 				/* Short of resources, suspend tx */
115856b2bdd1SGireesh Nagabhushana 
115956b2bdd1SGireesh Nagabhushana 				frame->b_next = next_frame;
116056b2bdd1SGireesh Nagabhushana 				break;
116156b2bdd1SGireesh Nagabhushana 			}
116256b2bdd1SGireesh Nagabhushana 
116356b2bdd1SGireesh Nagabhushana 			/*
116456b2bdd1SGireesh Nagabhushana 			 * Unrecoverable error for this frame, throw it
116556b2bdd1SGireesh Nagabhushana 			 * away and move on to the next.
116656b2bdd1SGireesh Nagabhushana 			 */
116756b2bdd1SGireesh Nagabhushana 
116856b2bdd1SGireesh Nagabhushana 			freemsg(frame);
116956b2bdd1SGireesh Nagabhushana 			continue;
117056b2bdd1SGireesh Nagabhushana 		}
117156b2bdd1SGireesh Nagabhushana 
117256b2bdd1SGireesh Nagabhushana 		if (coalescing != 0 &&
117356b2bdd1SGireesh Nagabhushana 		    add_to_txpkts(txq, &txpkts, frame, &txinfo) == 0) {
117456b2bdd1SGireesh Nagabhushana 
117556b2bdd1SGireesh Nagabhushana 			/* Successfully absorbed into txpkts */
117656b2bdd1SGireesh Nagabhushana 
117756b2bdd1SGireesh Nagabhushana 			write_ulp_cpl_sgl(pi, txq, &txpkts, &txinfo);
117856b2bdd1SGireesh Nagabhushana 			goto doorbell;
117956b2bdd1SGireesh Nagabhushana 		}
118056b2bdd1SGireesh Nagabhushana 
118156b2bdd1SGireesh Nagabhushana 		/*
118256b2bdd1SGireesh Nagabhushana 		 * We weren't coalescing to begin with, or current frame could
118356b2bdd1SGireesh Nagabhushana 		 * not be coalesced (add_to_txpkts flushes txpkts if a frame
118456b2bdd1SGireesh Nagabhushana 		 * given to it can't be coalesced).  Either way there should be
118556b2bdd1SGireesh Nagabhushana 		 * nothing in txpkts.
118656b2bdd1SGireesh Nagabhushana 		 */
118756b2bdd1SGireesh Nagabhushana 		ASSERT(txpkts.npkt == 0);
118856b2bdd1SGireesh Nagabhushana 
118956b2bdd1SGireesh Nagabhushana 		/* We're sending out individual frames now */
119056b2bdd1SGireesh Nagabhushana 		coalescing = 0;
119156b2bdd1SGireesh Nagabhushana 
119256b2bdd1SGireesh Nagabhushana 		if (eq->avail < 8)
119356b2bdd1SGireesh Nagabhushana 			(void) reclaim_tx_descs(txq, 8);
119456b2bdd1SGireesh Nagabhushana 		rc = write_txpkt_wr(pi, txq, frame, &txinfo);
119556b2bdd1SGireesh Nagabhushana 		if (rc != 0) {
119656b2bdd1SGireesh Nagabhushana 
119756b2bdd1SGireesh Nagabhushana 			/* Short of hardware descriptors, suspend tx */
119856b2bdd1SGireesh Nagabhushana 
119956b2bdd1SGireesh Nagabhushana 			/*
120056b2bdd1SGireesh Nagabhushana 			 * This is an unlikely but expensive failure.  We've
120156b2bdd1SGireesh Nagabhushana 			 * done all the hard work (DMA bindings etc.) and now we
120256b2bdd1SGireesh Nagabhushana 			 * can't send out the frame.  What's worse, we have to
120356b2bdd1SGireesh Nagabhushana 			 * spend even more time freeing up everything in txinfo.
120456b2bdd1SGireesh Nagabhushana 			 */
120556b2bdd1SGireesh Nagabhushana 			txq->qfull++;
120656b2bdd1SGireesh Nagabhushana 			free_txinfo_resources(txq, &txinfo);
120756b2bdd1SGireesh Nagabhushana 
120856b2bdd1SGireesh Nagabhushana 			frame->b_next = next_frame;
120956b2bdd1SGireesh Nagabhushana 			break;
121056b2bdd1SGireesh Nagabhushana 		}
121156b2bdd1SGireesh Nagabhushana 
121256b2bdd1SGireesh Nagabhushana doorbell:
121356b2bdd1SGireesh Nagabhushana 		/* Fewer and fewer doorbells as the queue fills up */
12143dde7c95SVishal Kulkarni 		if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2))) {
12153dde7c95SVishal Kulkarni 			txq->txbytes += txinfo.len;
12163dde7c95SVishal Kulkarni 			txq->txpkts++;
121756b2bdd1SGireesh Nagabhushana 			ring_tx_db(sc, eq);
12183dde7c95SVishal Kulkarni 		}
121956b2bdd1SGireesh Nagabhushana 		(void) reclaim_tx_descs(txq, 32);
122056b2bdd1SGireesh Nagabhushana 	}
122156b2bdd1SGireesh Nagabhushana 
122256b2bdd1SGireesh Nagabhushana 	if (txpkts.npkt > 0)
122356b2bdd1SGireesh Nagabhushana 		write_txpkts_wr(txq, &txpkts);
122456b2bdd1SGireesh Nagabhushana 
122556b2bdd1SGireesh Nagabhushana 	/*
122656b2bdd1SGireesh Nagabhushana 	 * frame not NULL means there was an error but we haven't thrown it
122756b2bdd1SGireesh Nagabhushana 	 * away.  This can happen when we're short of tx descriptors (qfull) or
122856b2bdd1SGireesh Nagabhushana 	 * maybe even DMA handles (dma_hdl_failed).  Either way, a credit flush
122956b2bdd1SGireesh Nagabhushana 	 * and reclaim will get things going again.
123056b2bdd1SGireesh Nagabhushana 	 *
123156b2bdd1SGireesh Nagabhushana 	 * If eq->avail is already 0 we know a credit flush was requested in the
123256b2bdd1SGireesh Nagabhushana 	 * WR that reduced it to 0 so we don't need another flush (we don't have
123356b2bdd1SGireesh Nagabhushana 	 * any descriptor for a flush WR anyway, duh).
123456b2bdd1SGireesh Nagabhushana 	 */
123556b2bdd1SGireesh Nagabhushana 	if (frame && eq->avail > 0)
123656b2bdd1SGireesh Nagabhushana 		write_txqflush_wr(txq);
123756b2bdd1SGireesh Nagabhushana 
123856b2bdd1SGireesh Nagabhushana 	if (eq->pending != 0)
123956b2bdd1SGireesh Nagabhushana 		ring_tx_db(sc, eq);
124056b2bdd1SGireesh Nagabhushana 
124156b2bdd1SGireesh Nagabhushana 	(void) reclaim_tx_descs(txq, eq->qsize);
124256b2bdd1SGireesh Nagabhushana 	TXQ_UNLOCK(txq);
124356b2bdd1SGireesh Nagabhushana 
124456b2bdd1SGireesh Nagabhushana 	return (frame);
124556b2bdd1SGireesh Nagabhushana }
124656b2bdd1SGireesh Nagabhushana 
124756b2bdd1SGireesh Nagabhushana static inline void
init_iq(struct sge_iq * iq,struct adapter * sc,int tmr_idx,int8_t pktc_idx,int qsize,uint8_t esize)12483dde7c95SVishal Kulkarni init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int8_t pktc_idx,
12493dde7c95SVishal Kulkarni 	int qsize, uint8_t esize)
125056b2bdd1SGireesh Nagabhushana {
125156b2bdd1SGireesh Nagabhushana 	ASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS);
125256b2bdd1SGireesh Nagabhushana 	ASSERT(pktc_idx < SGE_NCOUNTERS);	/* -ve is ok, means don't use */
125356b2bdd1SGireesh Nagabhushana 
125456b2bdd1SGireesh Nagabhushana 	iq->flags = 0;
125556b2bdd1SGireesh Nagabhushana 	iq->adapter = sc;
125656b2bdd1SGireesh Nagabhushana 	iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
125756b2bdd1SGireesh Nagabhushana 	iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
125856b2bdd1SGireesh Nagabhushana 	if (pktc_idx >= 0) {
125956b2bdd1SGireesh Nagabhushana 		iq->intr_params |= F_QINTR_CNT_EN;
126056b2bdd1SGireesh Nagabhushana 		iq->intr_pktc_idx = pktc_idx;
126156b2bdd1SGireesh Nagabhushana 	}
126256b2bdd1SGireesh Nagabhushana 	iq->qsize = roundup(qsize, 16);		/* See FW_IQ_CMD/iqsize */
126356b2bdd1SGireesh Nagabhushana 	iq->esize = max(esize, 16);		/* See FW_IQ_CMD/iqesize */
126456b2bdd1SGireesh Nagabhushana }
126556b2bdd1SGireesh Nagabhushana 
126656b2bdd1SGireesh Nagabhushana static inline void
init_fl(struct sge_fl * fl,uint16_t qsize)126756b2bdd1SGireesh Nagabhushana init_fl(struct sge_fl *fl, uint16_t qsize)
126856b2bdd1SGireesh Nagabhushana {
126956b2bdd1SGireesh Nagabhushana 
127056b2bdd1SGireesh Nagabhushana 	fl->qsize = qsize;
1271bbb9d5d6SJohn Levon 	fl->allocb_fail = 0;
127256b2bdd1SGireesh Nagabhushana }
127356b2bdd1SGireesh Nagabhushana 
127456b2bdd1SGireesh Nagabhushana static inline void
init_eq(struct adapter * sc,struct sge_eq * eq,uint16_t eqtype,uint16_t qsize,uint8_t tx_chan,uint16_t iqid)1275de483253SVishal Kulkarni init_eq(struct adapter *sc, struct sge_eq *eq, uint16_t eqtype, uint16_t qsize,
1276de483253SVishal Kulkarni     uint8_t tx_chan, uint16_t iqid)
127756b2bdd1SGireesh Nagabhushana {
1278de483253SVishal Kulkarni 	struct sge *s = &sc->sge;
1279de483253SVishal Kulkarni 	uint32_t r;
1280de483253SVishal Kulkarni 
128156b2bdd1SGireesh Nagabhushana 	ASSERT(tx_chan < NCHAN);
128256b2bdd1SGireesh Nagabhushana 	ASSERT(eqtype <= EQ_TYPEMASK);
128356b2bdd1SGireesh Nagabhushana 
1284de483253SVishal Kulkarni 	if (is_t5(sc->params.chip)) {
1285de483253SVishal Kulkarni 		r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
1286de483253SVishal Kulkarni 		r >>= S_QUEUESPERPAGEPF0 +
1287de483253SVishal Kulkarni 		    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
1288de483253SVishal Kulkarni 		s->s_qpp = r & M_QUEUESPERPAGEPF0;
1289de483253SVishal Kulkarni 	}
12904e0c5effSToomas Soome 
129156b2bdd1SGireesh Nagabhushana 	eq->flags = eqtype & EQ_TYPEMASK;
129256b2bdd1SGireesh Nagabhushana 	eq->tx_chan = tx_chan;
129356b2bdd1SGireesh Nagabhushana 	eq->iqid = iqid;
129456b2bdd1SGireesh Nagabhushana 	eq->qsize = qsize;
129556b2bdd1SGireesh Nagabhushana }
129656b2bdd1SGireesh Nagabhushana 
129756b2bdd1SGireesh Nagabhushana /*
129856b2bdd1SGireesh Nagabhushana  * Allocates the ring for an ingress queue and an optional freelist.  If the
129956b2bdd1SGireesh Nagabhushana  * freelist is specified it will be allocated and then associated with the
130056b2bdd1SGireesh Nagabhushana  * ingress queue.
130156b2bdd1SGireesh Nagabhushana  *
130256b2bdd1SGireesh Nagabhushana  * Returns errno on failure.  Resources allocated up to that point may still be
130356b2bdd1SGireesh Nagabhushana  * allocated.  Caller is responsible for cleanup in case this function fails.
130456b2bdd1SGireesh Nagabhushana  *
130556b2bdd1SGireesh Nagabhushana  * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then
130656b2bdd1SGireesh Nagabhushana  * the intr_idx specifies the vector, starting from 0.  Otherwise it specifies
130756b2bdd1SGireesh Nagabhushana  * the index of the queue to which its interrupts will be forwarded.
130856b2bdd1SGireesh Nagabhushana  */
130956b2bdd1SGireesh Nagabhushana static int
alloc_iq_fl(struct port_info * pi,struct sge_iq * iq,struct sge_fl * fl,int intr_idx,int cong)131056b2bdd1SGireesh Nagabhushana alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
131156b2bdd1SGireesh Nagabhushana     int intr_idx, int cong)
131256b2bdd1SGireesh Nagabhushana {
1313de483253SVishal Kulkarni 	int rc, i, cntxt_id;
131456b2bdd1SGireesh Nagabhushana 	size_t len;
131556b2bdd1SGireesh Nagabhushana 	struct fw_iq_cmd c;
131656b2bdd1SGireesh Nagabhushana 	struct adapter *sc = iq->adapter;
131756b2bdd1SGireesh Nagabhushana 	uint32_t v = 0;
131856b2bdd1SGireesh Nagabhushana 
131956b2bdd1SGireesh Nagabhushana 	len = iq->qsize * iq->esize;
132056b2bdd1SGireesh Nagabhushana 	rc = alloc_desc_ring(sc, len, DDI_DMA_READ, &iq->dhdl, &iq->ahdl,
132156b2bdd1SGireesh Nagabhushana 	    &iq->ba, (caddr_t *)&iq->desc);
132256b2bdd1SGireesh Nagabhushana 	if (rc != 0)
132356b2bdd1SGireesh Nagabhushana 		return (rc);
132456b2bdd1SGireesh Nagabhushana 
132556b2bdd1SGireesh Nagabhushana 	bzero(&c, sizeof (c));
132656b2bdd1SGireesh Nagabhushana 	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
132756b2bdd1SGireesh Nagabhushana 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
132856b2bdd1SGireesh Nagabhushana 	    V_FW_IQ_CMD_VFN(0));
132956b2bdd1SGireesh Nagabhushana 
133056b2bdd1SGireesh Nagabhushana 	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
133156b2bdd1SGireesh Nagabhushana 	    FW_LEN16(c));
133256b2bdd1SGireesh Nagabhushana 
133356b2bdd1SGireesh Nagabhushana 	/* Special handling for firmware event queue */
133456b2bdd1SGireesh Nagabhushana 	if (iq == &sc->sge.fwq)
133556b2bdd1SGireesh Nagabhushana 		v |= F_FW_IQ_CMD_IQASYNCH;
133656b2bdd1SGireesh Nagabhushana 
133756b2bdd1SGireesh Nagabhushana 	if (iq->flags & IQ_INTR)
133856b2bdd1SGireesh Nagabhushana 		ASSERT(intr_idx < sc->intr_count);
133956b2bdd1SGireesh Nagabhushana 	else
134056b2bdd1SGireesh Nagabhushana 		v |= F_FW_IQ_CMD_IQANDST;
134156b2bdd1SGireesh Nagabhushana 	v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx);
134256b2bdd1SGireesh Nagabhushana 
134356b2bdd1SGireesh Nagabhushana 	c.type_to_iqandstindex = cpu_to_be32(v |
134456b2bdd1SGireesh Nagabhushana 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
134556b2bdd1SGireesh Nagabhushana 	    V_FW_IQ_CMD_VIID(pi->viid) |
134656b2bdd1SGireesh Nagabhushana 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
134756b2bdd1SGireesh Nagabhushana 	c.iqdroprss_to_iqesize = cpu_to_be16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
134856b2bdd1SGireesh Nagabhushana 	    F_FW_IQ_CMD_IQGTSMODE |
134956b2bdd1SGireesh Nagabhushana 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
135056b2bdd1SGireesh Nagabhushana 	    V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
135156b2bdd1SGireesh Nagabhushana 	c.iqsize = cpu_to_be16(iq->qsize);
135256b2bdd1SGireesh Nagabhushana 	c.iqaddr = cpu_to_be64(iq->ba);
135356b2bdd1SGireesh Nagabhushana 	if (cong >= 0)
13547e6ad469SVishal Kulkarni 		c.iqns_to_fl0congen = BE_32(F_FW_IQ_CMD_IQFLINTCONGEN |
13557e6ad469SVishal Kulkarni 					V_FW_IQ_CMD_IQTYPE(cong ?
13567e6ad469SVishal Kulkarni 					FW_IQ_IQTYPE_NIC : FW_IQ_IQTYPE_OFLD));
135756b2bdd1SGireesh Nagabhushana 
135856b2bdd1SGireesh Nagabhushana 	if (fl != NULL) {
13593dde7c95SVishal Kulkarni 		unsigned int chip_ver = CHELSIO_CHIP_VERSION(sc->params.chip);
13603dde7c95SVishal Kulkarni 
136156b2bdd1SGireesh Nagabhushana 		mutex_init(&fl->lock, NULL, MUTEX_DRIVER,
136256b2bdd1SGireesh Nagabhushana 		    DDI_INTR_PRI(sc->intr_pri));
136356b2bdd1SGireesh Nagabhushana 		fl->flags |= FL_MTX;
136456b2bdd1SGireesh Nagabhushana 
136556b2bdd1SGireesh Nagabhushana 		len = fl->qsize * RX_FL_ESIZE;
136656b2bdd1SGireesh Nagabhushana 		rc = alloc_desc_ring(sc, len, DDI_DMA_WRITE, &fl->dhdl,
136756b2bdd1SGireesh Nagabhushana 		    &fl->ahdl, &fl->ba, (caddr_t *)&fl->desc);
136856b2bdd1SGireesh Nagabhushana 		if (rc != 0)
136956b2bdd1SGireesh Nagabhushana 			return (rc);
137056b2bdd1SGireesh Nagabhushana 
137156b2bdd1SGireesh Nagabhushana 		/* Allocate space for one software descriptor per buffer. */
13723dde7c95SVishal Kulkarni 		fl->cap = (fl->qsize - sc->sge.stat_len / RX_FL_ESIZE) * 8;
137356b2bdd1SGireesh Nagabhushana 		fl->sdesc = kmem_zalloc(sizeof (struct fl_sdesc) * fl->cap,
137456b2bdd1SGireesh Nagabhushana 		    KM_SLEEP);
137556b2bdd1SGireesh Nagabhushana 		fl->needed = fl->cap;
137656b2bdd1SGireesh Nagabhushana 		fl->lowat = roundup(sc->sge.fl_starve_threshold, 8);
137756b2bdd1SGireesh Nagabhushana 
137856b2bdd1SGireesh Nagabhushana 		c.iqns_to_fl0congen |=
137956b2bdd1SGireesh Nagabhushana 		    cpu_to_be32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
138056b2bdd1SGireesh Nagabhushana 		    F_FW_IQ_CMD_FL0PACKEN | F_FW_IQ_CMD_FL0PADEN);
138156b2bdd1SGireesh Nagabhushana 		if (cong >= 0) {
138256b2bdd1SGireesh Nagabhushana 			c.iqns_to_fl0congen |=
138356b2bdd1SGireesh Nagabhushana 			    BE_32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
138456b2bdd1SGireesh Nagabhushana 			    F_FW_IQ_CMD_FL0CONGCIF |
138556b2bdd1SGireesh Nagabhushana 			    F_FW_IQ_CMD_FL0CONGEN);
138656b2bdd1SGireesh Nagabhushana 		}
13873dde7c95SVishal Kulkarni 
13883dde7c95SVishal Kulkarni 		/* In T6, for egress queue type FL there is internal overhead
13893dde7c95SVishal Kulkarni 		 * of 16B for header going into FLM module.  Hence the maximum
13903dde7c95SVishal Kulkarni 		 * allowed burst size is 448 bytes.  For T4/T5, the hardware
13913dde7c95SVishal Kulkarni 		 * doesn't coalesce fetch requests if more than 64 bytes of
13923dde7c95SVishal Kulkarni 		 * Free List pointers are provided, so we use a 128-byte Fetch
13933dde7c95SVishal Kulkarni 		 * Burst Minimum there (T6 implements coalescing so we can use
13943dde7c95SVishal Kulkarni 		 * the smaller 64-byte value there).
13953dde7c95SVishal Kulkarni 		 */
13963dde7c95SVishal Kulkarni 
139756b2bdd1SGireesh Nagabhushana 		c.fl0dcaen_to_fl0cidxfthresh =
13983dde7c95SVishal Kulkarni 		    cpu_to_be16(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5
13993dde7c95SVishal Kulkarni 						     ? X_FETCHBURSTMIN_128B
14003dde7c95SVishal Kulkarni 						     : X_FETCHBURSTMIN_64B) |
14013dde7c95SVishal Kulkarni 		    V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5
14023dde7c95SVishal Kulkarni 					 ? X_FETCHBURSTMAX_512B
14033dde7c95SVishal Kulkarni 					 : X_FETCHBURSTMAX_256B));
140456b2bdd1SGireesh Nagabhushana 		c.fl0size = cpu_to_be16(fl->qsize);
140556b2bdd1SGireesh Nagabhushana 		c.fl0addr = cpu_to_be64(fl->ba);
140656b2bdd1SGireesh Nagabhushana 	}
140756b2bdd1SGireesh Nagabhushana 
140856b2bdd1SGireesh Nagabhushana 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof (c), &c);
140956b2bdd1SGireesh Nagabhushana 	if (rc != 0) {
141056b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
141156b2bdd1SGireesh Nagabhushana 		    "failed to create ingress queue: %d", rc);
141256b2bdd1SGireesh Nagabhushana 		return (rc);
141356b2bdd1SGireesh Nagabhushana 	}
141456b2bdd1SGireesh Nagabhushana 
141556b2bdd1SGireesh Nagabhushana 	iq->cdesc = iq->desc;
141656b2bdd1SGireesh Nagabhushana 	iq->cidx = 0;
141756b2bdd1SGireesh Nagabhushana 	iq->gen = 1;
141856b2bdd1SGireesh Nagabhushana 	iq->intr_next = iq->intr_params;
141956b2bdd1SGireesh Nagabhushana 	iq->adapter = sc;
142056b2bdd1SGireesh Nagabhushana 	iq->cntxt_id = be16_to_cpu(c.iqid);
142156b2bdd1SGireesh Nagabhushana 	iq->abs_id = be16_to_cpu(c.physiqid);
142256b2bdd1SGireesh Nagabhushana 	iq->flags |= IQ_ALLOCATED;
14233dde7c95SVishal Kulkarni 	mutex_init(&iq->lock, NULL,
14243dde7c95SVishal Kulkarni 		    MUTEX_DRIVER, DDI_INTR_PRI(DDI_INTR_PRI(sc->intr_pri)));
14253dde7c95SVishal Kulkarni 	iq->polling = 0;
142656b2bdd1SGireesh Nagabhushana 
142756b2bdd1SGireesh Nagabhushana 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
1428*77ac03cbSRahul Lakkireddy 	if (cntxt_id >= sc->sge.iqmap_sz) {
142956b2bdd1SGireesh Nagabhushana 		panic("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
1430*77ac03cbSRahul Lakkireddy 		      cntxt_id, sc->sge.iqmap_sz - 1);
143156b2bdd1SGireesh Nagabhushana 	}
143256b2bdd1SGireesh Nagabhushana 	sc->sge.iqmap[cntxt_id] = iq;
143356b2bdd1SGireesh Nagabhushana 
143456b2bdd1SGireesh Nagabhushana 	if (fl != NULL) {
143556b2bdd1SGireesh Nagabhushana 		fl->cntxt_id = be16_to_cpu(c.fl0id);
143656b2bdd1SGireesh Nagabhushana 		fl->pidx = fl->cidx = 0;
143756b2bdd1SGireesh Nagabhushana 		fl->copy_threshold = rx_copy_threshold;
143856b2bdd1SGireesh Nagabhushana 
143956b2bdd1SGireesh Nagabhushana 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
1440*77ac03cbSRahul Lakkireddy 		if (cntxt_id >= sc->sge.eqmap_sz) {
144156b2bdd1SGireesh Nagabhushana 			panic("%s: fl->cntxt_id (%d) more than the max (%d)",
1442*77ac03cbSRahul Lakkireddy 			      __func__, cntxt_id, sc->sge.eqmap_sz - 1);
144356b2bdd1SGireesh Nagabhushana 		}
144456b2bdd1SGireesh Nagabhushana 		sc->sge.eqmap[cntxt_id] = (void *)fl;
144556b2bdd1SGireesh Nagabhushana 
144656b2bdd1SGireesh Nagabhushana 		FL_LOCK(fl);
144756b2bdd1SGireesh Nagabhushana 		(void) refill_fl(sc, fl, fl->lowat);
144856b2bdd1SGireesh Nagabhushana 		FL_UNLOCK(fl);
144956b2bdd1SGireesh Nagabhushana 
145056b2bdd1SGireesh Nagabhushana 		iq->flags |= IQ_HAS_FL;
145156b2bdd1SGireesh Nagabhushana 	}
145256b2bdd1SGireesh Nagabhushana 
1453de483253SVishal Kulkarni 	if (is_t5(sc->params.chip) && cong >= 0) {
1454de483253SVishal Kulkarni 		uint32_t param, val;
1455de483253SVishal Kulkarni 
1456de483253SVishal Kulkarni 		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1457de483253SVishal Kulkarni 			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
1458de483253SVishal Kulkarni 			V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
1459de483253SVishal Kulkarni 		if (cong == 0)
1460de483253SVishal Kulkarni 			val = 1 << 19;
1461de483253SVishal Kulkarni 		else {
1462de483253SVishal Kulkarni 			val = 2 << 19;
1463de483253SVishal Kulkarni 			for (i = 0; i < 4; i++) {
1464de483253SVishal Kulkarni 				if (cong & (1 << i))
1465de483253SVishal Kulkarni 					val |= 1 << (i << 2);
1466de483253SVishal Kulkarni 			}
1467de483253SVishal Kulkarni 		}
1468de483253SVishal Kulkarni 
1469de483253SVishal Kulkarni 		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1470de483253SVishal Kulkarni 		if (rc != 0) {
1471de483253SVishal Kulkarni 			/* report error but carry on */
1472de483253SVishal Kulkarni 			cxgb_printf(sc->dip, CE_WARN,
1473de483253SVishal Kulkarni 			    "failed to set congestion manager context for "
1474de483253SVishal Kulkarni 			    "ingress queue %d: %d", iq->cntxt_id, rc);
1475de483253SVishal Kulkarni 		}
1476de483253SVishal Kulkarni 	}
1477de483253SVishal Kulkarni 
147856b2bdd1SGireesh Nagabhushana 	/* Enable IQ interrupts */
147956b2bdd1SGireesh Nagabhushana 	iq->state = IQS_IDLE;
148056b2bdd1SGireesh Nagabhushana 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
148156b2bdd1SGireesh Nagabhushana 	    V_INGRESSQID(iq->cntxt_id));
148256b2bdd1SGireesh Nagabhushana 
148356b2bdd1SGireesh Nagabhushana 	return (0);
148456b2bdd1SGireesh Nagabhushana }
148556b2bdd1SGireesh Nagabhushana 
148656b2bdd1SGireesh Nagabhushana static int
free_iq_fl(struct port_info * pi,struct sge_iq * iq,struct sge_fl * fl)148756b2bdd1SGireesh Nagabhushana free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
148856b2bdd1SGireesh Nagabhushana {
148956b2bdd1SGireesh Nagabhushana 	int rc;
149056b2bdd1SGireesh Nagabhushana 
149156b2bdd1SGireesh Nagabhushana 	if (iq != NULL) {
149289f249c9SRobert Mustacchi 		struct adapter *sc = iq->adapter;
149389f249c9SRobert Mustacchi 		dev_info_t *dip;
149489f249c9SRobert Mustacchi 
149589f249c9SRobert Mustacchi 		dip = pi ? pi->dip : sc->dip;
149656b2bdd1SGireesh Nagabhushana 		if (iq->flags & IQ_ALLOCATED) {
149756b2bdd1SGireesh Nagabhushana 			rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
149856b2bdd1SGireesh Nagabhushana 			    FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id,
149956b2bdd1SGireesh Nagabhushana 			    fl ? fl->cntxt_id : 0xffff, 0xffff);
150056b2bdd1SGireesh Nagabhushana 			if (rc != 0) {
150156b2bdd1SGireesh Nagabhushana 				cxgb_printf(dip, CE_WARN,
150256b2bdd1SGireesh Nagabhushana 				    "failed to free queue %p: %d", iq, rc);
150356b2bdd1SGireesh Nagabhushana 				return (rc);
150456b2bdd1SGireesh Nagabhushana 			}
15053dde7c95SVishal Kulkarni 			mutex_destroy(&iq->lock);
150656b2bdd1SGireesh Nagabhushana 			iq->flags &= ~IQ_ALLOCATED;
150756b2bdd1SGireesh Nagabhushana 		}
150856b2bdd1SGireesh Nagabhushana 
150956b2bdd1SGireesh Nagabhushana 		if (iq->desc != NULL) {
151056b2bdd1SGireesh Nagabhushana 			(void) free_desc_ring(&iq->dhdl, &iq->ahdl);
151156b2bdd1SGireesh Nagabhushana 			iq->desc = NULL;
151256b2bdd1SGireesh Nagabhushana 		}
151356b2bdd1SGireesh Nagabhushana 
151456b2bdd1SGireesh Nagabhushana 		bzero(iq, sizeof (*iq));
151556b2bdd1SGireesh Nagabhushana 	}
151656b2bdd1SGireesh Nagabhushana 
151756b2bdd1SGireesh Nagabhushana 	if (fl != NULL) {
151856b2bdd1SGireesh Nagabhushana 		if (fl->sdesc != NULL) {
151956b2bdd1SGireesh Nagabhushana 			FL_LOCK(fl);
152056b2bdd1SGireesh Nagabhushana 			free_fl_bufs(fl);
152156b2bdd1SGireesh Nagabhushana 			FL_UNLOCK(fl);
152256b2bdd1SGireesh Nagabhushana 
152356b2bdd1SGireesh Nagabhushana 			kmem_free(fl->sdesc, sizeof (struct fl_sdesc) *
152456b2bdd1SGireesh Nagabhushana 			    fl->cap);
152556b2bdd1SGireesh Nagabhushana 			fl->sdesc = NULL;
152656b2bdd1SGireesh Nagabhushana 		}
152756b2bdd1SGireesh Nagabhushana 
152856b2bdd1SGireesh Nagabhushana 		if (fl->desc != NULL) {
152956b2bdd1SGireesh Nagabhushana 			(void) free_desc_ring(&fl->dhdl, &fl->ahdl);
153056b2bdd1SGireesh Nagabhushana 			fl->desc = NULL;
153156b2bdd1SGireesh Nagabhushana 		}
153256b2bdd1SGireesh Nagabhushana 
153356b2bdd1SGireesh Nagabhushana 		if (fl->flags & FL_MTX) {
153456b2bdd1SGireesh Nagabhushana 			mutex_destroy(&fl->lock);
153556b2bdd1SGireesh Nagabhushana 			fl->flags &= ~FL_MTX;
153656b2bdd1SGireesh Nagabhushana 		}
153756b2bdd1SGireesh Nagabhushana 
153856b2bdd1SGireesh Nagabhushana 		bzero(fl, sizeof (struct sge_fl));
153956b2bdd1SGireesh Nagabhushana 	}
154056b2bdd1SGireesh Nagabhushana 
154156b2bdd1SGireesh Nagabhushana 	return (0);
154256b2bdd1SGireesh Nagabhushana }
154356b2bdd1SGireesh Nagabhushana 
154456b2bdd1SGireesh Nagabhushana static int
alloc_fwq(struct adapter * sc)154556b2bdd1SGireesh Nagabhushana alloc_fwq(struct adapter *sc)
154656b2bdd1SGireesh Nagabhushana {
154756b2bdd1SGireesh Nagabhushana 	int rc, intr_idx;
154856b2bdd1SGireesh Nagabhushana 	struct sge_iq *fwq = &sc->sge.fwq;
154956b2bdd1SGireesh Nagabhushana 
155056b2bdd1SGireesh Nagabhushana 	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE);
155156b2bdd1SGireesh Nagabhushana 	fwq->flags |= IQ_INTR;	/* always */
155256b2bdd1SGireesh Nagabhushana 	intr_idx = sc->intr_count > 1 ? 1 : 0;
155356b2bdd1SGireesh Nagabhushana 	rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
155456b2bdd1SGireesh Nagabhushana 	if (rc != 0) {
155556b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
155656b2bdd1SGireesh Nagabhushana 		    "failed to create firmware event queue: %d.", rc);
155756b2bdd1SGireesh Nagabhushana 		return (rc);
155856b2bdd1SGireesh Nagabhushana 	}
155956b2bdd1SGireesh Nagabhushana 
156056b2bdd1SGireesh Nagabhushana 	return (0);
156156b2bdd1SGireesh Nagabhushana }
156256b2bdd1SGireesh Nagabhushana 
156356b2bdd1SGireesh Nagabhushana static int
free_fwq(struct adapter * sc)156456b2bdd1SGireesh Nagabhushana free_fwq(struct adapter *sc)
156556b2bdd1SGireesh Nagabhushana {
156656b2bdd1SGireesh Nagabhushana 
156756b2bdd1SGireesh Nagabhushana 	return (free_iq_fl(NULL, &sc->sge.fwq, NULL));
156856b2bdd1SGireesh Nagabhushana }
156956b2bdd1SGireesh Nagabhushana 
15703dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
157156b2bdd1SGireesh Nagabhushana static int
alloc_mgmtq(struct adapter * sc)157256b2bdd1SGireesh Nagabhushana alloc_mgmtq(struct adapter *sc)
157356b2bdd1SGireesh Nagabhushana {
157456b2bdd1SGireesh Nagabhushana 	int rc;
157556b2bdd1SGireesh Nagabhushana 	struct sge_wrq *mgmtq = &sc->sge.mgmtq;
157656b2bdd1SGireesh Nagabhushana 
1577de483253SVishal Kulkarni 	init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
157856b2bdd1SGireesh Nagabhushana 	    sc->sge.fwq.cntxt_id);
157956b2bdd1SGireesh Nagabhushana 	rc = alloc_wrq(sc, NULL, mgmtq, 0);
158056b2bdd1SGireesh Nagabhushana 	if (rc != 0) {
158156b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
158256b2bdd1SGireesh Nagabhushana 		    "failed to create management queue: %d\n", rc);
158356b2bdd1SGireesh Nagabhushana 		return (rc);
158456b2bdd1SGireesh Nagabhushana 	}
158556b2bdd1SGireesh Nagabhushana 
158656b2bdd1SGireesh Nagabhushana 	return (0);
158756b2bdd1SGireesh Nagabhushana }
15883dde7c95SVishal Kulkarni #endif
158956b2bdd1SGireesh Nagabhushana 
159056b2bdd1SGireesh Nagabhushana static int
alloc_rxq(struct port_info * pi,struct sge_rxq * rxq,int intr_idx,int i)159156b2bdd1SGireesh Nagabhushana alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int i)
159256b2bdd1SGireesh Nagabhushana {
159356b2bdd1SGireesh Nagabhushana 	int rc;
159456b2bdd1SGireesh Nagabhushana 
159556b2bdd1SGireesh Nagabhushana 	rxq->port = pi;
15967e6ad469SVishal Kulkarni 	rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx,
15977e6ad469SVishal Kulkarni 			 t4_get_tp_ch_map(pi->adapter, pi->tx_chan));
159856b2bdd1SGireesh Nagabhushana 	if (rc != 0)
159956b2bdd1SGireesh Nagabhushana 		return (rc);
160056b2bdd1SGireesh Nagabhushana 
160156b2bdd1SGireesh Nagabhushana 	rxq->ksp = setup_rxq_kstats(pi, rxq, i);
160256b2bdd1SGireesh Nagabhushana 
160356b2bdd1SGireesh Nagabhushana 	return (rc);
160456b2bdd1SGireesh Nagabhushana }
160556b2bdd1SGireesh Nagabhushana 
160656b2bdd1SGireesh Nagabhushana static int
free_rxq(struct port_info * pi,struct sge_rxq * rxq)160756b2bdd1SGireesh Nagabhushana free_rxq(struct port_info *pi, struct sge_rxq *rxq)
160856b2bdd1SGireesh Nagabhushana {
160956b2bdd1SGireesh Nagabhushana 	int rc;
161056b2bdd1SGireesh Nagabhushana 
161156b2bdd1SGireesh Nagabhushana 	if (rxq->ksp != NULL) {
161256b2bdd1SGireesh Nagabhushana 		kstat_delete(rxq->ksp);
161356b2bdd1SGireesh Nagabhushana 		rxq->ksp = NULL;
161456b2bdd1SGireesh Nagabhushana 	}
161556b2bdd1SGireesh Nagabhushana 
161656b2bdd1SGireesh Nagabhushana 	rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
161756b2bdd1SGireesh Nagabhushana 	if (rc == 0)
161856b2bdd1SGireesh Nagabhushana 		bzero(&rxq->fl, sizeof (*rxq) - offsetof(struct sge_rxq, fl));
161956b2bdd1SGireesh Nagabhushana 
162056b2bdd1SGireesh Nagabhushana 	return (rc);
162156b2bdd1SGireesh Nagabhushana }
162256b2bdd1SGireesh Nagabhushana 
16233dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
162456b2bdd1SGireesh Nagabhushana static int
alloc_ofld_rxq(struct port_info * pi,struct sge_ofld_rxq * ofld_rxq,int intr_idx)162556b2bdd1SGireesh Nagabhushana alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
162656b2bdd1SGireesh Nagabhushana 	int intr_idx)
162756b2bdd1SGireesh Nagabhushana {
162856b2bdd1SGireesh Nagabhushana 	int rc;
162956b2bdd1SGireesh Nagabhushana 
163056b2bdd1SGireesh Nagabhushana 	rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
16317e6ad469SVishal Kulkarni 	    t4_get_tp_ch_map(pi->adapter, pi->tx_chan));
163256b2bdd1SGireesh Nagabhushana 	if (rc != 0)
163356b2bdd1SGireesh Nagabhushana 		return (rc);
163456b2bdd1SGireesh Nagabhushana 
163556b2bdd1SGireesh Nagabhushana 	return (rc);
163656b2bdd1SGireesh Nagabhushana }
163756b2bdd1SGireesh Nagabhushana 
163856b2bdd1SGireesh Nagabhushana static int
free_ofld_rxq(struct port_info * pi,struct sge_ofld_rxq * ofld_rxq)163956b2bdd1SGireesh Nagabhushana free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
164056b2bdd1SGireesh Nagabhushana {
164156b2bdd1SGireesh Nagabhushana 	int rc;
164256b2bdd1SGireesh Nagabhushana 
164356b2bdd1SGireesh Nagabhushana 	rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl);
164456b2bdd1SGireesh Nagabhushana 	if (rc == 0)
164556b2bdd1SGireesh Nagabhushana 		bzero(&ofld_rxq->fl, sizeof (*ofld_rxq) -
164656b2bdd1SGireesh Nagabhushana 		    offsetof(struct sge_ofld_rxq, fl));
164756b2bdd1SGireesh Nagabhushana 
164856b2bdd1SGireesh Nagabhushana 	return (rc);
164956b2bdd1SGireesh Nagabhushana }
165056b2bdd1SGireesh Nagabhushana #endif
165156b2bdd1SGireesh Nagabhushana 
165256b2bdd1SGireesh Nagabhushana static int
ctrl_eq_alloc(struct adapter * sc,struct sge_eq * eq)165356b2bdd1SGireesh Nagabhushana ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
165456b2bdd1SGireesh Nagabhushana {
165556b2bdd1SGireesh Nagabhushana 	int rc, cntxt_id;
165656b2bdd1SGireesh Nagabhushana 	struct fw_eq_ctrl_cmd c;
165756b2bdd1SGireesh Nagabhushana 
165856b2bdd1SGireesh Nagabhushana 	bzero(&c, sizeof (c));
165956b2bdd1SGireesh Nagabhushana 
166056b2bdd1SGireesh Nagabhushana 	c.op_to_vfn = BE_32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
166156b2bdd1SGireesh Nagabhushana 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
166256b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_CTRL_CMD_VFN(0));
166356b2bdd1SGireesh Nagabhushana 	c.alloc_to_len16 = BE_32(F_FW_EQ_CTRL_CMD_ALLOC |
166456b2bdd1SGireesh Nagabhushana 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
166556b2bdd1SGireesh Nagabhushana 	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* TODO */
166656b2bdd1SGireesh Nagabhushana 	c.physeqid_pkd = BE_32(0);
166756b2bdd1SGireesh Nagabhushana 	c.fetchszm_to_iqid =
166856b2bdd1SGireesh Nagabhushana 	    BE_32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
166956b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
167056b2bdd1SGireesh Nagabhushana 	    F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
167156b2bdd1SGireesh Nagabhushana 	c.dcaen_to_eqsize =
167256b2bdd1SGireesh Nagabhushana 	    BE_32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
167356b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
167456b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
167556b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
167656b2bdd1SGireesh Nagabhushana 	c.eqaddr = BE_64(eq->ba);
167756b2bdd1SGireesh Nagabhushana 
167856b2bdd1SGireesh Nagabhushana 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof (c), &c);
167956b2bdd1SGireesh Nagabhushana 	if (rc != 0) {
168056b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
168156b2bdd1SGireesh Nagabhushana 		    "failed to create control queue %d: %d", eq->tx_chan, rc);
168256b2bdd1SGireesh Nagabhushana 		return (rc);
168356b2bdd1SGireesh Nagabhushana 	}
168456b2bdd1SGireesh Nagabhushana 	eq->flags |= EQ_ALLOCATED;
168556b2bdd1SGireesh Nagabhushana 
168656b2bdd1SGireesh Nagabhushana 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(BE_32(c.cmpliqid_eqid));
168756b2bdd1SGireesh Nagabhushana 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1688*77ac03cbSRahul Lakkireddy 	if (cntxt_id >= sc->sge.eqmap_sz)
168956b2bdd1SGireesh Nagabhushana 		panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1690*77ac03cbSRahul Lakkireddy 		      cntxt_id, sc->sge.eqmap_sz - 1);
169156b2bdd1SGireesh Nagabhushana 	sc->sge.eqmap[cntxt_id] = eq;
169256b2bdd1SGireesh Nagabhushana 
169356b2bdd1SGireesh Nagabhushana 	return (rc);
169456b2bdd1SGireesh Nagabhushana }
169556b2bdd1SGireesh Nagabhushana 
169656b2bdd1SGireesh Nagabhushana static int
eth_eq_alloc(struct adapter * sc,struct port_info * pi,struct sge_eq * eq)169756b2bdd1SGireesh Nagabhushana eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
169856b2bdd1SGireesh Nagabhushana {
169956b2bdd1SGireesh Nagabhushana 	int rc, cntxt_id;
170056b2bdd1SGireesh Nagabhushana 	struct fw_eq_eth_cmd c;
170156b2bdd1SGireesh Nagabhushana 
170256b2bdd1SGireesh Nagabhushana 	bzero(&c, sizeof (c));
170356b2bdd1SGireesh Nagabhushana 
170456b2bdd1SGireesh Nagabhushana 	c.op_to_vfn = BE_32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
170556b2bdd1SGireesh Nagabhushana 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
170656b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_ETH_CMD_VFN(0));
170756b2bdd1SGireesh Nagabhushana 	c.alloc_to_len16 = BE_32(F_FW_EQ_ETH_CMD_ALLOC |
170856b2bdd1SGireesh Nagabhushana 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
170906b05760SVishal Kulkarni 	c.autoequiqe_to_viid = BE_32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
171006b05760SVishal Kulkarni 	    F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->viid));
171156b2bdd1SGireesh Nagabhushana 	c.fetchszm_to_iqid =
171256b2bdd1SGireesh Nagabhushana 	    BE_32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
171356b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
171456b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_ETH_CMD_IQID(eq->iqid));
171556b2bdd1SGireesh Nagabhushana 	c.dcaen_to_eqsize = BE_32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
171656b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
171756b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
171856b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
171956b2bdd1SGireesh Nagabhushana 	c.eqaddr = BE_64(eq->ba);
172056b2bdd1SGireesh Nagabhushana 
172156b2bdd1SGireesh Nagabhushana 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof (c), &c);
172256b2bdd1SGireesh Nagabhushana 	if (rc != 0) {
172356b2bdd1SGireesh Nagabhushana 		cxgb_printf(pi->dip, CE_WARN,
172456b2bdd1SGireesh Nagabhushana 		    "failed to create Ethernet egress queue: %d", rc);
172556b2bdd1SGireesh Nagabhushana 		return (rc);
172656b2bdd1SGireesh Nagabhushana 	}
172756b2bdd1SGireesh Nagabhushana 	eq->flags |= EQ_ALLOCATED;
172856b2bdd1SGireesh Nagabhushana 
172956b2bdd1SGireesh Nagabhushana 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(BE_32(c.eqid_pkd));
173056b2bdd1SGireesh Nagabhushana 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1731*77ac03cbSRahul Lakkireddy 	if (cntxt_id >= sc->sge.eqmap_sz)
173256b2bdd1SGireesh Nagabhushana 		panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1733*77ac03cbSRahul Lakkireddy 		      cntxt_id, sc->sge.eqmap_sz - 1);
173456b2bdd1SGireesh Nagabhushana 	sc->sge.eqmap[cntxt_id] = eq;
173556b2bdd1SGireesh Nagabhushana 
173656b2bdd1SGireesh Nagabhushana 	return (rc);
173756b2bdd1SGireesh Nagabhushana }
173856b2bdd1SGireesh Nagabhushana 
17393dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
174056b2bdd1SGireesh Nagabhushana static int
ofld_eq_alloc(struct adapter * sc,struct port_info * pi,struct sge_eq * eq)174156b2bdd1SGireesh Nagabhushana ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
174256b2bdd1SGireesh Nagabhushana {
174356b2bdd1SGireesh Nagabhushana 	int rc, cntxt_id;
174456b2bdd1SGireesh Nagabhushana 	struct fw_eq_ofld_cmd c;
174556b2bdd1SGireesh Nagabhushana 
174656b2bdd1SGireesh Nagabhushana 	bzero(&c, sizeof (c));
174756b2bdd1SGireesh Nagabhushana 
174856b2bdd1SGireesh Nagabhushana 	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
174956b2bdd1SGireesh Nagabhushana 	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
175056b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_OFLD_CMD_VFN(0));
175156b2bdd1SGireesh Nagabhushana 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
175256b2bdd1SGireesh Nagabhushana 	    F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
175356b2bdd1SGireesh Nagabhushana 	c.fetchszm_to_iqid =
175456b2bdd1SGireesh Nagabhushana 	    htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
175556b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
175656b2bdd1SGireesh Nagabhushana 	    F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
175756b2bdd1SGireesh Nagabhushana 	c.dcaen_to_eqsize =
175856b2bdd1SGireesh Nagabhushana 	    BE_32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
175956b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
176056b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
176156b2bdd1SGireesh Nagabhushana 	    V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize));
176256b2bdd1SGireesh Nagabhushana 	c.eqaddr = BE_64(eq->ba);
176356b2bdd1SGireesh Nagabhushana 
176456b2bdd1SGireesh Nagabhushana 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof (c), &c);
176556b2bdd1SGireesh Nagabhushana 	if (rc != 0) {
176656b2bdd1SGireesh Nagabhushana 		cxgb_printf(pi->dip, CE_WARN,
176756b2bdd1SGireesh Nagabhushana 		    "failed to create egress queue for TCP offload: %d", rc);
176856b2bdd1SGireesh Nagabhushana 		return (rc);
176956b2bdd1SGireesh Nagabhushana 	}
177056b2bdd1SGireesh Nagabhushana 	eq->flags |= EQ_ALLOCATED;
177156b2bdd1SGireesh Nagabhushana 
177256b2bdd1SGireesh Nagabhushana 	eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(BE_32(c.eqid_pkd));
177356b2bdd1SGireesh Nagabhushana 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
1774*77ac03cbSRahul Lakkireddy 	if (cntxt_id >= sc->sge.eqmap_sz)
177556b2bdd1SGireesh Nagabhushana 		panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
1776*77ac03cbSRahul Lakkireddy 		      cntxt_id, sc->sge.eqmap_sz - 1);
177756b2bdd1SGireesh Nagabhushana 	sc->sge.eqmap[cntxt_id] = eq;
177856b2bdd1SGireesh Nagabhushana 
177956b2bdd1SGireesh Nagabhushana 	return (rc);
178056b2bdd1SGireesh Nagabhushana }
178156b2bdd1SGireesh Nagabhushana #endif
178256b2bdd1SGireesh Nagabhushana 
178356b2bdd1SGireesh Nagabhushana static int
alloc_eq(struct adapter * sc,struct port_info * pi,struct sge_eq * eq)178456b2bdd1SGireesh Nagabhushana alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
178556b2bdd1SGireesh Nagabhushana {
178656b2bdd1SGireesh Nagabhushana 	int rc;
178756b2bdd1SGireesh Nagabhushana 	size_t len;
178856b2bdd1SGireesh Nagabhushana 
178956b2bdd1SGireesh Nagabhushana 	mutex_init(&eq->lock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(sc->intr_pri));
179056b2bdd1SGireesh Nagabhushana 	eq->flags |= EQ_MTX;
179156b2bdd1SGireesh Nagabhushana 
179256b2bdd1SGireesh Nagabhushana 	len = eq->qsize * EQ_ESIZE;
179356b2bdd1SGireesh Nagabhushana 	rc = alloc_desc_ring(sc, len, DDI_DMA_WRITE, &eq->desc_dhdl,
179456b2bdd1SGireesh Nagabhushana 	    &eq->desc_ahdl, &eq->ba, (caddr_t *)&eq->desc);
179556b2bdd1SGireesh Nagabhushana 	if (rc != 0)
179656b2bdd1SGireesh Nagabhushana 		return (rc);
179756b2bdd1SGireesh Nagabhushana 
17983dde7c95SVishal Kulkarni 	eq->cap = eq->qsize - sc->sge.stat_len / EQ_ESIZE;
179956b2bdd1SGireesh Nagabhushana 	eq->spg = (void *)&eq->desc[eq->cap];
180056b2bdd1SGireesh Nagabhushana 	eq->avail = eq->cap - 1;	/* one less to avoid cidx = pidx */
180156b2bdd1SGireesh Nagabhushana 	eq->pidx = eq->cidx = 0;
1802de483253SVishal Kulkarni 	eq->doorbells = sc->doorbells;
180356b2bdd1SGireesh Nagabhushana 
180456b2bdd1SGireesh Nagabhushana 	switch (eq->flags & EQ_TYPEMASK) {
180556b2bdd1SGireesh Nagabhushana 	case EQ_CTRL:
180656b2bdd1SGireesh Nagabhushana 		rc = ctrl_eq_alloc(sc, eq);
180756b2bdd1SGireesh Nagabhushana 		break;
180856b2bdd1SGireesh Nagabhushana 
180956b2bdd1SGireesh Nagabhushana 	case EQ_ETH:
181056b2bdd1SGireesh Nagabhushana 		rc = eth_eq_alloc(sc, pi, eq);
181156b2bdd1SGireesh Nagabhushana 		break;
181256b2bdd1SGireesh Nagabhushana 
18133dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
181456b2bdd1SGireesh Nagabhushana 	case EQ_OFLD:
181556b2bdd1SGireesh Nagabhushana 		rc = ofld_eq_alloc(sc, pi, eq);
181656b2bdd1SGireesh Nagabhushana 		break;
181756b2bdd1SGireesh Nagabhushana #endif
181856b2bdd1SGireesh Nagabhushana 
181956b2bdd1SGireesh Nagabhushana 	default:
182056b2bdd1SGireesh Nagabhushana 		panic("%s: invalid eq type %d.", __func__,
182156b2bdd1SGireesh Nagabhushana 		    eq->flags & EQ_TYPEMASK);
182256b2bdd1SGireesh Nagabhushana 	}
1823de483253SVishal Kulkarni 
1824de483253SVishal Kulkarni 	if (eq->doorbells &
1825de483253SVishal Kulkarni 		(DOORBELL_UDB | DOORBELL_UDBWC | DOORBELL_WCWR)) {
1826de483253SVishal Kulkarni 		uint32_t s_qpp = sc->sge.s_qpp;
1827de483253SVishal Kulkarni 		uint32_t mask = (1 << s_qpp) - 1;
1828de483253SVishal Kulkarni 		volatile uint8_t *udb;
1829de483253SVishal Kulkarni 
1830de483253SVishal Kulkarni 		udb = (volatile uint8_t *)sc->reg1p + UDBS_DB_OFFSET;
1831de483253SVishal Kulkarni 		udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT;   /* pg offset */
1832de483253SVishal Kulkarni 		eq->udb_qid = eq->cntxt_id & mask;              /* id in page */
1833de483253SVishal Kulkarni 		if (eq->udb_qid > PAGE_SIZE / UDBS_SEG_SIZE)
1834de483253SVishal Kulkarni 			eq->doorbells &= ~DOORBELL_WCWR;
1835de483253SVishal Kulkarni 		else {
1836de483253SVishal Kulkarni 			udb += eq->udb_qid << UDBS_SEG_SHIFT;   /* seg offset */
1837de483253SVishal Kulkarni 			eq->udb_qid = 0;
1838de483253SVishal Kulkarni 		}
1839de483253SVishal Kulkarni 		eq->udb = (volatile void *)udb;
1840de483253SVishal Kulkarni 	}
1841de483253SVishal Kulkarni 
184256b2bdd1SGireesh Nagabhushana 	if (rc != 0) {
184356b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
184456b2bdd1SGireesh Nagabhushana 		    "failed to allocate egress queue(%d): %d",
184556b2bdd1SGireesh Nagabhushana 		    eq->flags & EQ_TYPEMASK, rc);
184656b2bdd1SGireesh Nagabhushana 	}
184756b2bdd1SGireesh Nagabhushana 
184856b2bdd1SGireesh Nagabhushana 	return (rc);
184956b2bdd1SGireesh Nagabhushana }
185056b2bdd1SGireesh Nagabhushana 
185156b2bdd1SGireesh Nagabhushana static int
free_eq(struct adapter * sc,struct sge_eq * eq)185256b2bdd1SGireesh Nagabhushana free_eq(struct adapter *sc, struct sge_eq *eq)
185356b2bdd1SGireesh Nagabhushana {
185456b2bdd1SGireesh Nagabhushana 	int rc;
185556b2bdd1SGireesh Nagabhushana 
185656b2bdd1SGireesh Nagabhushana 	if (eq->flags & EQ_ALLOCATED) {
185756b2bdd1SGireesh Nagabhushana 		switch (eq->flags & EQ_TYPEMASK) {
185856b2bdd1SGireesh Nagabhushana 		case EQ_CTRL:
185956b2bdd1SGireesh Nagabhushana 			rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0,
186056b2bdd1SGireesh Nagabhushana 			    eq->cntxt_id);
186156b2bdd1SGireesh Nagabhushana 			break;
186256b2bdd1SGireesh Nagabhushana 
186356b2bdd1SGireesh Nagabhushana 		case EQ_ETH:
186456b2bdd1SGireesh Nagabhushana 			rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0,
186556b2bdd1SGireesh Nagabhushana 			    eq->cntxt_id);
186656b2bdd1SGireesh Nagabhushana 			break;
186756b2bdd1SGireesh Nagabhushana 
18683dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
186956b2bdd1SGireesh Nagabhushana 		case EQ_OFLD:
187056b2bdd1SGireesh Nagabhushana 			rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0,
187156b2bdd1SGireesh Nagabhushana 			    eq->cntxt_id);
187256b2bdd1SGireesh Nagabhushana 			break;
187356b2bdd1SGireesh Nagabhushana #endif
187456b2bdd1SGireesh Nagabhushana 
187556b2bdd1SGireesh Nagabhushana 		default:
187656b2bdd1SGireesh Nagabhushana 			panic("%s: invalid eq type %d.", __func__,
187756b2bdd1SGireesh Nagabhushana 			    eq->flags & EQ_TYPEMASK);
187856b2bdd1SGireesh Nagabhushana 		}
187956b2bdd1SGireesh Nagabhushana 		if (rc != 0) {
188056b2bdd1SGireesh Nagabhushana 			cxgb_printf(sc->dip, CE_WARN,
188156b2bdd1SGireesh Nagabhushana 			    "failed to free egress queue (%d): %d",
188256b2bdd1SGireesh Nagabhushana 			    eq->flags & EQ_TYPEMASK, rc);
188356b2bdd1SGireesh Nagabhushana 			return (rc);
188456b2bdd1SGireesh Nagabhushana 		}
188556b2bdd1SGireesh Nagabhushana 		eq->flags &= ~EQ_ALLOCATED;
188656b2bdd1SGireesh Nagabhushana 	}
188756b2bdd1SGireesh Nagabhushana 
188856b2bdd1SGireesh Nagabhushana 	if (eq->desc != NULL) {
188956b2bdd1SGireesh Nagabhushana 		(void) free_desc_ring(&eq->desc_dhdl, &eq->desc_ahdl);
189056b2bdd1SGireesh Nagabhushana 		eq->desc = NULL;
189156b2bdd1SGireesh Nagabhushana 	}
189256b2bdd1SGireesh Nagabhushana 
189356b2bdd1SGireesh Nagabhushana 	if (eq->flags & EQ_MTX)
189456b2bdd1SGireesh Nagabhushana 		mutex_destroy(&eq->lock);
189556b2bdd1SGireesh Nagabhushana 
189656b2bdd1SGireesh Nagabhushana 	bzero(eq, sizeof (*eq));
189756b2bdd1SGireesh Nagabhushana 	return (0);
189856b2bdd1SGireesh Nagabhushana }
189956b2bdd1SGireesh Nagabhushana 
19003dde7c95SVishal Kulkarni #ifdef TCP_OFFLOAD_ENABLE
190156b2bdd1SGireesh Nagabhushana /* ARGSUSED */
190256b2bdd1SGireesh Nagabhushana static int
alloc_wrq(struct adapter * sc,struct port_info * pi,struct sge_wrq * wrq,int idx)190356b2bdd1SGireesh Nagabhushana alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq,
190456b2bdd1SGireesh Nagabhushana     int idx)
190556b2bdd1SGireesh Nagabhushana {
190656b2bdd1SGireesh Nagabhushana 	int rc;
190756b2bdd1SGireesh Nagabhushana 
190856b2bdd1SGireesh Nagabhushana 	rc = alloc_eq(sc, pi, &wrq->eq);
190956b2bdd1SGireesh Nagabhushana 	if (rc != 0)
191056b2bdd1SGireesh Nagabhushana 		return (rc);
191156b2bdd1SGireesh Nagabhushana 
191256b2bdd1SGireesh Nagabhushana 	wrq->adapter = sc;
191356b2bdd1SGireesh Nagabhushana 	wrq->wr_list.head = NULL;
191456b2bdd1SGireesh Nagabhushana 	wrq->wr_list.tail = NULL;
191556b2bdd1SGireesh Nagabhushana 
191656b2bdd1SGireesh Nagabhushana 	/*
191756b2bdd1SGireesh Nagabhushana 	 * TODO: use idx to figure out what kind of wrq this is and install
191856b2bdd1SGireesh Nagabhushana 	 * useful kstats for it.
191956b2bdd1SGireesh Nagabhushana 	 */
192056b2bdd1SGireesh Nagabhushana 
192156b2bdd1SGireesh Nagabhushana 	return (rc);
192256b2bdd1SGireesh Nagabhushana }
192356b2bdd1SGireesh Nagabhushana 
192456b2bdd1SGireesh Nagabhushana static int
free_wrq(struct adapter * sc,struct sge_wrq * wrq)192556b2bdd1SGireesh Nagabhushana free_wrq(struct adapter *sc, struct sge_wrq *wrq)
192656b2bdd1SGireesh Nagabhushana {
192756b2bdd1SGireesh Nagabhushana 	int rc;
192856b2bdd1SGireesh Nagabhushana 
192956b2bdd1SGireesh Nagabhushana 	rc = free_eq(sc, &wrq->eq);
193056b2bdd1SGireesh Nagabhushana 	if (rc != 0)
193156b2bdd1SGireesh Nagabhushana 		return (rc);
193256b2bdd1SGireesh Nagabhushana 
193356b2bdd1SGireesh Nagabhushana 	bzero(wrq, sizeof (*wrq));
193456b2bdd1SGireesh Nagabhushana 	return (0);
193556b2bdd1SGireesh Nagabhushana }
19363dde7c95SVishal Kulkarni #endif
193756b2bdd1SGireesh Nagabhushana 
193856b2bdd1SGireesh Nagabhushana static int
alloc_txq(struct port_info * pi,struct sge_txq * txq,int idx)193956b2bdd1SGireesh Nagabhushana alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx)
194056b2bdd1SGireesh Nagabhushana {
194156b2bdd1SGireesh Nagabhushana 	int rc, i;
194256b2bdd1SGireesh Nagabhushana 	struct adapter *sc = pi->adapter;
194356b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &txq->eq;
194456b2bdd1SGireesh Nagabhushana 
194556b2bdd1SGireesh Nagabhushana 	rc = alloc_eq(sc, pi, eq);
194656b2bdd1SGireesh Nagabhushana 	if (rc != 0)
194756b2bdd1SGireesh Nagabhushana 		return (rc);
194856b2bdd1SGireesh Nagabhushana 
194956b2bdd1SGireesh Nagabhushana 	txq->port = pi;
195056b2bdd1SGireesh Nagabhushana 	txq->sdesc = kmem_zalloc(sizeof (struct tx_sdesc) * eq->cap, KM_SLEEP);
195156b2bdd1SGireesh Nagabhushana 	txq->txb_size = eq->qsize * tx_copy_threshold;
195256b2bdd1SGireesh Nagabhushana 	rc = alloc_tx_copybuffer(sc, txq->txb_size, &txq->txb_dhdl,
195356b2bdd1SGireesh Nagabhushana 	    &txq->txb_ahdl, &txq->txb_ba, &txq->txb_va);
195456b2bdd1SGireesh Nagabhushana 	if (rc == 0)
195556b2bdd1SGireesh Nagabhushana 		txq->txb_avail = txq->txb_size;
195656b2bdd1SGireesh Nagabhushana 	else
195756b2bdd1SGireesh Nagabhushana 		txq->txb_avail = txq->txb_size = 0;
195856b2bdd1SGireesh Nagabhushana 
195956b2bdd1SGireesh Nagabhushana 	/*
196056b2bdd1SGireesh Nagabhushana 	 * TODO: is this too low?  Worst case would need around 4 times qsize
196156b2bdd1SGireesh Nagabhushana 	 * (all tx descriptors filled to the brim with SGLs, with each entry in
196256b2bdd1SGireesh Nagabhushana 	 * the SGL coming from a distinct DMA handle).  Increase tx_dhdl_total
196356b2bdd1SGireesh Nagabhushana 	 * if you see too many dma_hdl_failed.
196456b2bdd1SGireesh Nagabhushana 	 */
196556b2bdd1SGireesh Nagabhushana 	txq->tx_dhdl_total = eq->qsize * 2;
196656b2bdd1SGireesh Nagabhushana 	txq->tx_dhdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
196756b2bdd1SGireesh Nagabhushana 	    txq->tx_dhdl_total, KM_SLEEP);
196856b2bdd1SGireesh Nagabhushana 	for (i = 0; i < txq->tx_dhdl_total; i++) {
196956b2bdd1SGireesh Nagabhushana 		rc = ddi_dma_alloc_handle(sc->dip, &sc->sge.dma_attr_tx,
197056b2bdd1SGireesh Nagabhushana 		    DDI_DMA_SLEEP, 0, &txq->tx_dhdl[i]);
197156b2bdd1SGireesh Nagabhushana 		if (rc != DDI_SUCCESS) {
197256b2bdd1SGireesh Nagabhushana 			cxgb_printf(sc->dip, CE_WARN,
197356b2bdd1SGireesh Nagabhushana 			    "%s: failed to allocate DMA handle (%d)",
197456b2bdd1SGireesh Nagabhushana 			    __func__, rc);
197556b2bdd1SGireesh Nagabhushana 			return (rc == DDI_DMA_NORESOURCES ? ENOMEM : EINVAL);
197656b2bdd1SGireesh Nagabhushana 		}
197756b2bdd1SGireesh Nagabhushana 		txq->tx_dhdl_avail++;
197856b2bdd1SGireesh Nagabhushana 	}
197956b2bdd1SGireesh Nagabhushana 
198056b2bdd1SGireesh Nagabhushana 	txq->ksp = setup_txq_kstats(pi, txq, idx);
198156b2bdd1SGireesh Nagabhushana 
198256b2bdd1SGireesh Nagabhushana 	return (rc);
198356b2bdd1SGireesh Nagabhushana }
198456b2bdd1SGireesh Nagabhushana 
198556b2bdd1SGireesh Nagabhushana static int
free_txq(struct port_info * pi,struct sge_txq * txq)198656b2bdd1SGireesh Nagabhushana free_txq(struct port_info *pi, struct sge_txq *txq)
198756b2bdd1SGireesh Nagabhushana {
198856b2bdd1SGireesh Nagabhushana 	int i;
198956b2bdd1SGireesh Nagabhushana 	struct adapter *sc = pi->adapter;
199056b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &txq->eq;
199156b2bdd1SGireesh Nagabhushana 
199256b2bdd1SGireesh Nagabhushana 	if (txq->ksp != NULL) {
199356b2bdd1SGireesh Nagabhushana 		kstat_delete(txq->ksp);
199456b2bdd1SGireesh Nagabhushana 		txq->ksp = NULL;
199556b2bdd1SGireesh Nagabhushana 	}
199656b2bdd1SGireesh Nagabhushana 
199756b2bdd1SGireesh Nagabhushana 	if (txq->txb_va != NULL) {
199856b2bdd1SGireesh Nagabhushana 		(void) free_desc_ring(&txq->txb_dhdl, &txq->txb_ahdl);
199956b2bdd1SGireesh Nagabhushana 		txq->txb_va = NULL;
200056b2bdd1SGireesh Nagabhushana 	}
200156b2bdd1SGireesh Nagabhushana 
200256b2bdd1SGireesh Nagabhushana 	if (txq->sdesc != NULL) {
200356b2bdd1SGireesh Nagabhushana 		struct tx_sdesc *sd;
200456b2bdd1SGireesh Nagabhushana 		ddi_dma_handle_t hdl;
200556b2bdd1SGireesh Nagabhushana 
200656b2bdd1SGireesh Nagabhushana 		TXQ_LOCK(txq);
200756b2bdd1SGireesh Nagabhushana 		while (eq->cidx != eq->pidx) {
200856b2bdd1SGireesh Nagabhushana 			sd = &txq->sdesc[eq->cidx];
200956b2bdd1SGireesh Nagabhushana 
201056b2bdd1SGireesh Nagabhushana 			for (i = sd->hdls_used; i; i--) {
201156b2bdd1SGireesh Nagabhushana 				hdl = txq->tx_dhdl[txq->tx_dhdl_cidx];
201256b2bdd1SGireesh Nagabhushana 				(void) ddi_dma_unbind_handle(hdl);
201356b2bdd1SGireesh Nagabhushana 				if (++txq->tx_dhdl_cidx == txq->tx_dhdl_total)
201456b2bdd1SGireesh Nagabhushana 					txq->tx_dhdl_cidx = 0;
201556b2bdd1SGireesh Nagabhushana 			}
201656b2bdd1SGireesh Nagabhushana 
201756b2bdd1SGireesh Nagabhushana 			ASSERT(sd->m);
201856b2bdd1SGireesh Nagabhushana 			freemsgchain(sd->m);
201956b2bdd1SGireesh Nagabhushana 
202056b2bdd1SGireesh Nagabhushana 			eq->cidx += sd->desc_used;
202156b2bdd1SGireesh Nagabhushana 			if (eq->cidx >= eq->cap)
202256b2bdd1SGireesh Nagabhushana 				eq->cidx -= eq->cap;
202356b2bdd1SGireesh Nagabhushana 
202456b2bdd1SGireesh Nagabhushana 			txq->txb_avail += txq->txb_used;
202556b2bdd1SGireesh Nagabhushana 		}
202656b2bdd1SGireesh Nagabhushana 		ASSERT(txq->tx_dhdl_cidx == txq->tx_dhdl_pidx);
202756b2bdd1SGireesh Nagabhushana 		ASSERT(txq->txb_avail == txq->txb_size);
202856b2bdd1SGireesh Nagabhushana 		TXQ_UNLOCK(txq);
202956b2bdd1SGireesh Nagabhushana 
203056b2bdd1SGireesh Nagabhushana 		kmem_free(txq->sdesc, sizeof (struct tx_sdesc) * eq->cap);
203156b2bdd1SGireesh Nagabhushana 		txq->sdesc = NULL;
203256b2bdd1SGireesh Nagabhushana 	}
203356b2bdd1SGireesh Nagabhushana 
203456b2bdd1SGireesh Nagabhushana 	if (txq->tx_dhdl != NULL) {
203556b2bdd1SGireesh Nagabhushana 		for (i = 0; i < txq->tx_dhdl_total; i++) {
203656b2bdd1SGireesh Nagabhushana 			if (txq->tx_dhdl[i] != NULL)
203756b2bdd1SGireesh Nagabhushana 				ddi_dma_free_handle(&txq->tx_dhdl[i]);
203856b2bdd1SGireesh Nagabhushana 		}
203956b2bdd1SGireesh Nagabhushana 	}
204056b2bdd1SGireesh Nagabhushana 
204156b2bdd1SGireesh Nagabhushana 	(void) free_eq(sc, &txq->eq);
204256b2bdd1SGireesh Nagabhushana 
204356b2bdd1SGireesh Nagabhushana 	bzero(txq, sizeof (*txq));
204456b2bdd1SGireesh Nagabhushana 	return (0);
204556b2bdd1SGireesh Nagabhushana }
204656b2bdd1SGireesh Nagabhushana 
204756b2bdd1SGireesh Nagabhushana /*
204856b2bdd1SGireesh Nagabhushana  * Allocates a block of contiguous memory for DMA.  Can be used to allocate
204956b2bdd1SGireesh Nagabhushana  * memory for descriptor rings or for tx/rx copy buffers.
205056b2bdd1SGireesh Nagabhushana  *
205156b2bdd1SGireesh Nagabhushana  * Caller does not have to clean up anything if this function fails, it cleans
205256b2bdd1SGireesh Nagabhushana  * up after itself.
205356b2bdd1SGireesh Nagabhushana  *
205456b2bdd1SGireesh Nagabhushana  * Caller provides the following:
205556b2bdd1SGireesh Nagabhushana  * len		length of the block of memory to allocate.
205656b2bdd1SGireesh Nagabhushana  * flags	DDI_DMA_* flags to use (CONSISTENT/STREAMING, READ/WRITE/RDWR)
205756b2bdd1SGireesh Nagabhushana  * acc_attr	device access attributes for the allocation.
205856b2bdd1SGireesh Nagabhushana  * dma_attr	DMA attributes for the allocation
205956b2bdd1SGireesh Nagabhushana  *
206056b2bdd1SGireesh Nagabhushana  * If the function is successful it fills up this information:
206156b2bdd1SGireesh Nagabhushana  * dma_hdl	DMA handle for the allocated memory
206256b2bdd1SGireesh Nagabhushana  * acc_hdl	access handle for the allocated memory
206356b2bdd1SGireesh Nagabhushana  * ba		bus address of the allocated memory
206456b2bdd1SGireesh Nagabhushana  * va		KVA of the allocated memory.
206556b2bdd1SGireesh Nagabhushana  */
206656b2bdd1SGireesh Nagabhushana static int
alloc_dma_memory(struct adapter * sc,size_t len,int flags,ddi_device_acc_attr_t * acc_attr,ddi_dma_attr_t * dma_attr,ddi_dma_handle_t * dma_hdl,ddi_acc_handle_t * acc_hdl,uint64_t * pba,caddr_t * pva)206756b2bdd1SGireesh Nagabhushana alloc_dma_memory(struct adapter *sc, size_t len, int flags,
206856b2bdd1SGireesh Nagabhushana     ddi_device_acc_attr_t *acc_attr, ddi_dma_attr_t *dma_attr,
206956b2bdd1SGireesh Nagabhushana     ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl,
207056b2bdd1SGireesh Nagabhushana     uint64_t *pba, caddr_t *pva)
207156b2bdd1SGireesh Nagabhushana {
207256b2bdd1SGireesh Nagabhushana 	int rc;
207356b2bdd1SGireesh Nagabhushana 	ddi_dma_handle_t dhdl;
207456b2bdd1SGireesh Nagabhushana 	ddi_acc_handle_t ahdl;
207556b2bdd1SGireesh Nagabhushana 	ddi_dma_cookie_t cookie;
207656b2bdd1SGireesh Nagabhushana 	uint_t ccount;
207756b2bdd1SGireesh Nagabhushana 	caddr_t va;
207856b2bdd1SGireesh Nagabhushana 	size_t real_len;
207956b2bdd1SGireesh Nagabhushana 
208056b2bdd1SGireesh Nagabhushana 	*pva = NULL;
208156b2bdd1SGireesh Nagabhushana 
208256b2bdd1SGireesh Nagabhushana 	/*
208356b2bdd1SGireesh Nagabhushana 	 * DMA handle.
208456b2bdd1SGireesh Nagabhushana 	 */
208556b2bdd1SGireesh Nagabhushana 	rc = ddi_dma_alloc_handle(sc->dip, dma_attr, DDI_DMA_SLEEP, 0, &dhdl);
208656b2bdd1SGireesh Nagabhushana 	if (rc != DDI_SUCCESS) {
208756b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
208856b2bdd1SGireesh Nagabhushana 		    "failed to allocate DMA handle: %d", rc);
208956b2bdd1SGireesh Nagabhushana 
209056b2bdd1SGireesh Nagabhushana 		return (rc == DDI_DMA_NORESOURCES ? ENOMEM : EINVAL);
209156b2bdd1SGireesh Nagabhushana 	}
209256b2bdd1SGireesh Nagabhushana 
209356b2bdd1SGireesh Nagabhushana 	/*
209456b2bdd1SGireesh Nagabhushana 	 * Memory suitable for DMA.
209556b2bdd1SGireesh Nagabhushana 	 */
209656b2bdd1SGireesh Nagabhushana 	rc = ddi_dma_mem_alloc(dhdl, len, acc_attr,
209756b2bdd1SGireesh Nagabhushana 	    flags & DDI_DMA_CONSISTENT ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
209856b2bdd1SGireesh Nagabhushana 	    DDI_DMA_SLEEP, 0, &va, &real_len, &ahdl);
209956b2bdd1SGireesh Nagabhushana 	if (rc != DDI_SUCCESS) {
210056b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
210156b2bdd1SGireesh Nagabhushana 		    "failed to allocate DMA memory: %d", rc);
210256b2bdd1SGireesh Nagabhushana 
210356b2bdd1SGireesh Nagabhushana 		ddi_dma_free_handle(&dhdl);
210456b2bdd1SGireesh Nagabhushana 		return (ENOMEM);
210556b2bdd1SGireesh Nagabhushana 	}
210656b2bdd1SGireesh Nagabhushana 
210756b2bdd1SGireesh Nagabhushana 	if (len != real_len) {
210856b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
210956b2bdd1SGireesh Nagabhushana 		    "%s: len (%u) != real_len (%u)\n", len, real_len);
211056b2bdd1SGireesh Nagabhushana 	}
211156b2bdd1SGireesh Nagabhushana 
211256b2bdd1SGireesh Nagabhushana 	/*
211356b2bdd1SGireesh Nagabhushana 	 * DMA bindings.
211456b2bdd1SGireesh Nagabhushana 	 */
211556b2bdd1SGireesh Nagabhushana 	rc = ddi_dma_addr_bind_handle(dhdl, NULL, va, real_len, flags, NULL,
211656b2bdd1SGireesh Nagabhushana 	    NULL, &cookie, &ccount);
211756b2bdd1SGireesh Nagabhushana 	if (rc != DDI_DMA_MAPPED) {
211856b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
211956b2bdd1SGireesh Nagabhushana 		    "failed to map DMA memory: %d", rc);
212056b2bdd1SGireesh Nagabhushana 
212156b2bdd1SGireesh Nagabhushana 		ddi_dma_mem_free(&ahdl);
212256b2bdd1SGireesh Nagabhushana 		ddi_dma_free_handle(&dhdl);
212356b2bdd1SGireesh Nagabhushana 		return (ENOMEM);
212456b2bdd1SGireesh Nagabhushana 	}
212556b2bdd1SGireesh Nagabhushana 	if (ccount != 1) {
212656b2bdd1SGireesh Nagabhushana 		cxgb_printf(sc->dip, CE_WARN,
212756b2bdd1SGireesh Nagabhushana 		    "unusable DMA mapping (%d segments)", ccount);
212856b2bdd1SGireesh Nagabhushana 		(void) free_desc_ring(&dhdl, &ahdl);
212956b2bdd1SGireesh Nagabhushana 	}
213056b2bdd1SGireesh Nagabhushana 
213156b2bdd1SGireesh Nagabhushana 	bzero(va, real_len);
213256b2bdd1SGireesh Nagabhushana 	*dma_hdl = dhdl;
213356b2bdd1SGireesh Nagabhushana 	*acc_hdl = ahdl;
213456b2bdd1SGireesh Nagabhushana 	*pba = cookie.dmac_laddress;
213556b2bdd1SGireesh Nagabhushana 	*pva = va;
213656b2bdd1SGireesh Nagabhushana 
213756b2bdd1SGireesh Nagabhushana 	return (0);
213856b2bdd1SGireesh Nagabhushana }
213956b2bdd1SGireesh Nagabhushana 
214056b2bdd1SGireesh Nagabhushana static int
free_dma_memory(ddi_dma_handle_t * dhdl,ddi_acc_handle_t * ahdl)214156b2bdd1SGireesh Nagabhushana free_dma_memory(ddi_dma_handle_t *dhdl, ddi_acc_handle_t *ahdl)
214256b2bdd1SGireesh Nagabhushana {
214356b2bdd1SGireesh Nagabhushana 	(void) ddi_dma_unbind_handle(*dhdl);
214456b2bdd1SGireesh Nagabhushana 	ddi_dma_mem_free(ahdl);
214556b2bdd1SGireesh Nagabhushana 	ddi_dma_free_handle(dhdl);
214656b2bdd1SGireesh Nagabhushana 
214756b2bdd1SGireesh Nagabhushana 	return (0);
214856b2bdd1SGireesh Nagabhushana }
214956b2bdd1SGireesh Nagabhushana 
215056b2bdd1SGireesh Nagabhushana static int
alloc_desc_ring(struct adapter * sc,size_t len,int rw,ddi_dma_handle_t * dma_hdl,ddi_acc_handle_t * acc_hdl,uint64_t * pba,caddr_t * pva)215156b2bdd1SGireesh Nagabhushana alloc_desc_ring(struct adapter *sc, size_t len, int rw,
215256b2bdd1SGireesh Nagabhushana     ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl,
215356b2bdd1SGireesh Nagabhushana     uint64_t *pba, caddr_t *pva)
215456b2bdd1SGireesh Nagabhushana {
215556b2bdd1SGireesh Nagabhushana 	ddi_device_acc_attr_t *acc_attr = &sc->sge.acc_attr_desc;
215656b2bdd1SGireesh Nagabhushana 	ddi_dma_attr_t *dma_attr = &sc->sge.dma_attr_desc;
215756b2bdd1SGireesh Nagabhushana 
215856b2bdd1SGireesh Nagabhushana 	return (alloc_dma_memory(sc, len, DDI_DMA_CONSISTENT | rw, acc_attr,
215956b2bdd1SGireesh Nagabhushana 	    dma_attr, dma_hdl, acc_hdl, pba, pva));
216056b2bdd1SGireesh Nagabhushana }
216156b2bdd1SGireesh Nagabhushana 
216256b2bdd1SGireesh Nagabhushana static int
free_desc_ring(ddi_dma_handle_t * dhdl,ddi_acc_handle_t * ahdl)216356b2bdd1SGireesh Nagabhushana free_desc_ring(ddi_dma_handle_t *dhdl, ddi_acc_handle_t *ahdl)
216456b2bdd1SGireesh Nagabhushana {
216556b2bdd1SGireesh Nagabhushana 	return (free_dma_memory(dhdl, ahdl));
216656b2bdd1SGireesh Nagabhushana }
216756b2bdd1SGireesh Nagabhushana 
216856b2bdd1SGireesh Nagabhushana static int
alloc_tx_copybuffer(struct adapter * sc,size_t len,ddi_dma_handle_t * dma_hdl,ddi_acc_handle_t * acc_hdl,uint64_t * pba,caddr_t * pva)216956b2bdd1SGireesh Nagabhushana alloc_tx_copybuffer(struct adapter *sc, size_t len,
217056b2bdd1SGireesh Nagabhushana     ddi_dma_handle_t *dma_hdl, ddi_acc_handle_t *acc_hdl,
217156b2bdd1SGireesh Nagabhushana     uint64_t *pba, caddr_t *pva)
217256b2bdd1SGireesh Nagabhushana {
217356b2bdd1SGireesh Nagabhushana 	ddi_device_acc_attr_t *acc_attr = &sc->sge.acc_attr_tx;
217456b2bdd1SGireesh Nagabhushana 	ddi_dma_attr_t *dma_attr = &sc->sge.dma_attr_desc; /* NOT dma_attr_tx */
217556b2bdd1SGireesh Nagabhushana 
217656b2bdd1SGireesh Nagabhushana 	return (alloc_dma_memory(sc, len, DDI_DMA_STREAMING | DDI_DMA_WRITE,
217756b2bdd1SGireesh Nagabhushana 	    acc_attr, dma_attr, dma_hdl, acc_hdl, pba, pva));
217856b2bdd1SGireesh Nagabhushana }
217956b2bdd1SGireesh Nagabhushana 
218056b2bdd1SGireesh Nagabhushana static inline bool
is_new_response(const struct sge_iq * iq,struct rsp_ctrl ** ctrl)218156b2bdd1SGireesh Nagabhushana is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
218256b2bdd1SGireesh Nagabhushana {
218356b2bdd1SGireesh Nagabhushana 	(void) ddi_dma_sync(iq->dhdl, (uintptr_t)iq->cdesc -
218456b2bdd1SGireesh Nagabhushana 	    (uintptr_t)iq->desc, iq->esize, DDI_DMA_SYNC_FORKERNEL);
218556b2bdd1SGireesh Nagabhushana 
218656b2bdd1SGireesh Nagabhushana 	*ctrl = (void *)((uintptr_t)iq->cdesc +
218756b2bdd1SGireesh Nagabhushana 	    (iq->esize - sizeof (struct rsp_ctrl)));
218856b2bdd1SGireesh Nagabhushana 
218956b2bdd1SGireesh Nagabhushana 	return ((((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen));
219056b2bdd1SGireesh Nagabhushana }
219156b2bdd1SGireesh Nagabhushana 
219256b2bdd1SGireesh Nagabhushana static inline void
iq_next(struct sge_iq * iq)219356b2bdd1SGireesh Nagabhushana iq_next(struct sge_iq *iq)
219456b2bdd1SGireesh Nagabhushana {
219556b2bdd1SGireesh Nagabhushana 	iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
219656b2bdd1SGireesh Nagabhushana 	if (++iq->cidx == iq->qsize - 1) {
219756b2bdd1SGireesh Nagabhushana 		iq->cidx = 0;
219856b2bdd1SGireesh Nagabhushana 		iq->gen ^= 1;
219956b2bdd1SGireesh Nagabhushana 		iq->cdesc = iq->desc;
220056b2bdd1SGireesh Nagabhushana 	}
220156b2bdd1SGireesh Nagabhushana }
220256b2bdd1SGireesh Nagabhushana 
220356b2bdd1SGireesh Nagabhushana /*
220456b2bdd1SGireesh Nagabhushana  * Fill up the freelist by upto nbufs and maybe ring its doorbell.
220556b2bdd1SGireesh Nagabhushana  *
220656b2bdd1SGireesh Nagabhushana  * Returns non-zero to indicate that it should be added to the list of starving
220756b2bdd1SGireesh Nagabhushana  * freelists.
220856b2bdd1SGireesh Nagabhushana  */
220956b2bdd1SGireesh Nagabhushana static int
refill_fl(struct adapter * sc,struct sge_fl * fl,int nbufs)221056b2bdd1SGireesh Nagabhushana refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs)
221156b2bdd1SGireesh Nagabhushana {
221256b2bdd1SGireesh Nagabhushana 	uint64_t *d = &fl->desc[fl->pidx];
221356b2bdd1SGireesh Nagabhushana 	struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
221456b2bdd1SGireesh Nagabhushana 
221556b2bdd1SGireesh Nagabhushana 	FL_LOCK_ASSERT_OWNED(fl);
221656b2bdd1SGireesh Nagabhushana 	ASSERT(nbufs >= 0);
221756b2bdd1SGireesh Nagabhushana 
221856b2bdd1SGireesh Nagabhushana 	if (nbufs > fl->needed)
221956b2bdd1SGireesh Nagabhushana 		nbufs = fl->needed;
222056b2bdd1SGireesh Nagabhushana 
222156b2bdd1SGireesh Nagabhushana 	while (nbufs--) {
222256b2bdd1SGireesh Nagabhushana 		if (sd->rxb != NULL) {
222356b2bdd1SGireesh Nagabhushana 			if (sd->rxb->ref_cnt == 1) {
222456b2bdd1SGireesh Nagabhushana 				/*
222556b2bdd1SGireesh Nagabhushana 				 * Buffer is available for recycling.  Two ways
222656b2bdd1SGireesh Nagabhushana 				 * this can happen:
222756b2bdd1SGireesh Nagabhushana 				 *
222856b2bdd1SGireesh Nagabhushana 				 * a) All the packets DMA'd into it last time
222956b2bdd1SGireesh Nagabhushana 				 *    around were within the rx_copy_threshold
223056b2bdd1SGireesh Nagabhushana 				 *    and no part of the buffer was ever passed
223156b2bdd1SGireesh Nagabhushana 				 *    up (ref_cnt never went over 1).
223256b2bdd1SGireesh Nagabhushana 				 *
223356b2bdd1SGireesh Nagabhushana 				 * b) Packets DMA'd into the buffer were passed
223456b2bdd1SGireesh Nagabhushana 				 *    up but have all been freed by the upper
223556b2bdd1SGireesh Nagabhushana 				 *    layers by now (ref_cnt went over 1 but is
223656b2bdd1SGireesh Nagabhushana 				 *    now back to 1).
223756b2bdd1SGireesh Nagabhushana 				 *
223856b2bdd1SGireesh Nagabhushana 				 * Either way the bus address in the descriptor
223956b2bdd1SGireesh Nagabhushana 				 * ring is already valid.
224056b2bdd1SGireesh Nagabhushana 				 */
224156b2bdd1SGireesh Nagabhushana 				ASSERT(*d == cpu_to_be64(sd->rxb->ba));
224256b2bdd1SGireesh Nagabhushana 				d++;
224356b2bdd1SGireesh Nagabhushana 				goto recycled;
224456b2bdd1SGireesh Nagabhushana 			} else {
224556b2bdd1SGireesh Nagabhushana 				/*
224656b2bdd1SGireesh Nagabhushana 				 * Buffer still in use and we need a
224756b2bdd1SGireesh Nagabhushana 				 * replacement. But first release our reference
224856b2bdd1SGireesh Nagabhushana 				 * on the existing buffer.
224956b2bdd1SGireesh Nagabhushana 				 */
225056b2bdd1SGireesh Nagabhushana 				rxbuf_free(sd->rxb);
225156b2bdd1SGireesh Nagabhushana 			}
225256b2bdd1SGireesh Nagabhushana 		}
225356b2bdd1SGireesh Nagabhushana 
225456b2bdd1SGireesh Nagabhushana 		sd->rxb = rxbuf_alloc(sc->sge.rxbuf_cache, KM_NOSLEEP, 1);
225556b2bdd1SGireesh Nagabhushana 		if (sd->rxb == NULL)
225656b2bdd1SGireesh Nagabhushana 			break;
225756b2bdd1SGireesh Nagabhushana 		*d++ = cpu_to_be64(sd->rxb->ba);
225856b2bdd1SGireesh Nagabhushana 
225956b2bdd1SGireesh Nagabhushana recycled:	fl->pending++;
226056b2bdd1SGireesh Nagabhushana 		sd++;
226156b2bdd1SGireesh Nagabhushana 		fl->needed--;
226256b2bdd1SGireesh Nagabhushana 		if (++fl->pidx == fl->cap) {
226356b2bdd1SGireesh Nagabhushana 			fl->pidx = 0;
226456b2bdd1SGireesh Nagabhushana 			sd = fl->sdesc;
226556b2bdd1SGireesh Nagabhushana 			d = fl->desc;
226656b2bdd1SGireesh Nagabhushana 		}
226756b2bdd1SGireesh Nagabhushana 	}
226856b2bdd1SGireesh Nagabhushana 
226956b2bdd1SGireesh Nagabhushana 	if (fl->pending >= 8)
227056b2bdd1SGireesh Nagabhushana 		ring_fl_db(sc, fl);
227156b2bdd1SGireesh Nagabhushana 
227256b2bdd1SGireesh Nagabhushana 	return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
227356b2bdd1SGireesh Nagabhushana }
227456b2bdd1SGireesh Nagabhushana 
227556b2bdd1SGireesh Nagabhushana #ifndef TAILQ_FOREACH_SAFE
227656b2bdd1SGireesh Nagabhushana #define	TAILQ_FOREACH_SAFE(var, head, field, tvar)			\
227756b2bdd1SGireesh Nagabhushana 	for ((var) = TAILQ_FIRST((head));				\
227856b2bdd1SGireesh Nagabhushana 	    (var) && ((tvar) = TAILQ_NEXT((var), field), 1);		\
227956b2bdd1SGireesh Nagabhushana 	    (var) = (tvar))
228056b2bdd1SGireesh Nagabhushana #endif
228156b2bdd1SGireesh Nagabhushana 
228256b2bdd1SGireesh Nagabhushana /*
228356b2bdd1SGireesh Nagabhushana  * Attempt to refill all starving freelists.
228456b2bdd1SGireesh Nagabhushana  */
228556b2bdd1SGireesh Nagabhushana static void
refill_sfl(void * arg)228656b2bdd1SGireesh Nagabhushana refill_sfl(void *arg)
228756b2bdd1SGireesh Nagabhushana {
228856b2bdd1SGireesh Nagabhushana 	struct adapter *sc = arg;
228956b2bdd1SGireesh Nagabhushana 	struct sge_fl *fl, *fl_temp;
229056b2bdd1SGireesh Nagabhushana 
229156b2bdd1SGireesh Nagabhushana 	mutex_enter(&sc->sfl_lock);
229256b2bdd1SGireesh Nagabhushana 	TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
229356b2bdd1SGireesh Nagabhushana 		FL_LOCK(fl);
229456b2bdd1SGireesh Nagabhushana 		(void) refill_fl(sc, fl, 64);
229556b2bdd1SGireesh Nagabhushana 		if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) {
229656b2bdd1SGireesh Nagabhushana 			TAILQ_REMOVE(&sc->sfl, fl, link);
229756b2bdd1SGireesh Nagabhushana 			fl->flags &= ~FL_STARVING;
229856b2bdd1SGireesh Nagabhushana 		}
229956b2bdd1SGireesh Nagabhushana 		FL_UNLOCK(fl);
230056b2bdd1SGireesh Nagabhushana 	}
230156b2bdd1SGireesh Nagabhushana 
230256b2bdd1SGireesh Nagabhushana 	if (!TAILQ_EMPTY(&sc->sfl) != 0)
230356b2bdd1SGireesh Nagabhushana 		sc->sfl_timer =  timeout(refill_sfl, sc, drv_usectohz(100000));
230456b2bdd1SGireesh Nagabhushana 	mutex_exit(&sc->sfl_lock);
230556b2bdd1SGireesh Nagabhushana }
230656b2bdd1SGireesh Nagabhushana 
230756b2bdd1SGireesh Nagabhushana static void
add_fl_to_sfl(struct adapter * sc,struct sge_fl * fl)230856b2bdd1SGireesh Nagabhushana add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
230956b2bdd1SGireesh Nagabhushana {
231056b2bdd1SGireesh Nagabhushana 	mutex_enter(&sc->sfl_lock);
231156b2bdd1SGireesh Nagabhushana 	FL_LOCK(fl);
231256b2bdd1SGireesh Nagabhushana 	if ((fl->flags & FL_DOOMED) == 0) {
231356b2bdd1SGireesh Nagabhushana 		if (TAILQ_EMPTY(&sc->sfl) != 0) {
231456b2bdd1SGireesh Nagabhushana 			sc->sfl_timer = timeout(refill_sfl, sc,
231556b2bdd1SGireesh Nagabhushana 			    drv_usectohz(100000));
231656b2bdd1SGireesh Nagabhushana 		}
231756b2bdd1SGireesh Nagabhushana 		fl->flags |= FL_STARVING;
231856b2bdd1SGireesh Nagabhushana 		TAILQ_INSERT_TAIL(&sc->sfl, fl, link);
231956b2bdd1SGireesh Nagabhushana 	}
232056b2bdd1SGireesh Nagabhushana 	FL_UNLOCK(fl);
232156b2bdd1SGireesh Nagabhushana 	mutex_exit(&sc->sfl_lock);
232256b2bdd1SGireesh Nagabhushana }
232356b2bdd1SGireesh Nagabhushana 
232456b2bdd1SGireesh Nagabhushana static void
free_fl_bufs(struct sge_fl * fl)232556b2bdd1SGireesh Nagabhushana free_fl_bufs(struct sge_fl *fl)
232656b2bdd1SGireesh Nagabhushana {
232756b2bdd1SGireesh Nagabhushana 	struct fl_sdesc *sd;
232856b2bdd1SGireesh Nagabhushana 	unsigned int i;
232956b2bdd1SGireesh Nagabhushana 
233056b2bdd1SGireesh Nagabhushana 	FL_LOCK_ASSERT_OWNED(fl);
233156b2bdd1SGireesh Nagabhushana 
233256b2bdd1SGireesh Nagabhushana 	for (i = 0; i < fl->cap; i++) {
233356b2bdd1SGireesh Nagabhushana 		sd = &fl->sdesc[i];
233456b2bdd1SGireesh Nagabhushana 
233556b2bdd1SGireesh Nagabhushana 		if (sd->rxb != NULL) {
233656b2bdd1SGireesh Nagabhushana 			rxbuf_free(sd->rxb);
233756b2bdd1SGireesh Nagabhushana 			sd->rxb = NULL;
233856b2bdd1SGireesh Nagabhushana 		}
233956b2bdd1SGireesh Nagabhushana 	}
234056b2bdd1SGireesh Nagabhushana }
234156b2bdd1SGireesh Nagabhushana 
234256b2bdd1SGireesh Nagabhushana /*
234356b2bdd1SGireesh Nagabhushana  * Note that fl->cidx and fl->offset are left unchanged in case of failure.
234456b2bdd1SGireesh Nagabhushana  */
234556b2bdd1SGireesh Nagabhushana static mblk_t *
get_fl_payload(struct adapter * sc,struct sge_fl * fl,uint32_t len_newbuf,int * fl_bufs_used)23463dde7c95SVishal Kulkarni get_fl_payload(struct adapter *sc, struct sge_fl *fl,
23473dde7c95SVishal Kulkarni 	       uint32_t len_newbuf, int *fl_bufs_used)
234856b2bdd1SGireesh Nagabhushana {
234956b2bdd1SGireesh Nagabhushana 	struct mblk_pair frame = {0};
235056b2bdd1SGireesh Nagabhushana 	struct rxbuf *rxb;
235156b2bdd1SGireesh Nagabhushana 	mblk_t *m = NULL;
235256b2bdd1SGireesh Nagabhushana 	uint_t nbuf = 0, len, copy, n;
2353bbb9d5d6SJohn Levon 	uint32_t cidx, offset, rcidx, roffset;
235456b2bdd1SGireesh Nagabhushana 
235556b2bdd1SGireesh Nagabhushana 	/*
235656b2bdd1SGireesh Nagabhushana 	 * The SGE won't pack a new frame into the current buffer if the entire
235756b2bdd1SGireesh Nagabhushana 	 * payload doesn't fit in the remaining space.  Move on to the next buf
235856b2bdd1SGireesh Nagabhushana 	 * in that case.
235956b2bdd1SGireesh Nagabhushana 	 */
2360bbb9d5d6SJohn Levon 	rcidx = fl->cidx;
2361bbb9d5d6SJohn Levon 	roffset = fl->offset;
236256b2bdd1SGireesh Nagabhushana 	if (fl->offset > 0 && len_newbuf & F_RSPD_NEWBUF) {
236356b2bdd1SGireesh Nagabhushana 		fl->offset = 0;
236456b2bdd1SGireesh Nagabhushana 		if (++fl->cidx == fl->cap)
236556b2bdd1SGireesh Nagabhushana 			fl->cidx = 0;
236656b2bdd1SGireesh Nagabhushana 		nbuf++;
236756b2bdd1SGireesh Nagabhushana 	}
236856b2bdd1SGireesh Nagabhushana 	cidx = fl->cidx;
236956b2bdd1SGireesh Nagabhushana 	offset = fl->offset;
237056b2bdd1SGireesh Nagabhushana 
237156b2bdd1SGireesh Nagabhushana 	len = G_RSPD_LEN(len_newbuf);	/* pktshift + payload length */
237256b2bdd1SGireesh Nagabhushana 	copy = (len <= fl->copy_threshold);
237356b2bdd1SGireesh Nagabhushana 	if (copy != 0) {
237456b2bdd1SGireesh Nagabhushana 		frame.head = m = allocb(len, BPRI_HI);
2375bbb9d5d6SJohn Levon 		if (m == NULL) {
2376bbb9d5d6SJohn Levon 			fl->allocb_fail++;
2377bbb9d5d6SJohn Levon 			cmn_err(CE_WARN,"%s: mbuf allocation failure "
2378bbb9d5d6SJohn Levon 					"count = %llu", __func__,
2379bbb9d5d6SJohn Levon 					(unsigned long long)fl->allocb_fail);
2380bbb9d5d6SJohn Levon 			fl->cidx = rcidx;
2381bbb9d5d6SJohn Levon 			fl->offset = roffset;
238256b2bdd1SGireesh Nagabhushana 			return (NULL);
2383bbb9d5d6SJohn Levon 		}
238456b2bdd1SGireesh Nagabhushana 	}
238556b2bdd1SGireesh Nagabhushana 
238656b2bdd1SGireesh Nagabhushana 	while (len) {
238756b2bdd1SGireesh Nagabhushana 		rxb = fl->sdesc[cidx].rxb;
238856b2bdd1SGireesh Nagabhushana 		n = min(len, rxb->buf_size - offset);
238956b2bdd1SGireesh Nagabhushana 
239056b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_sync(rxb->dhdl, offset, n,
239156b2bdd1SGireesh Nagabhushana 		    DDI_DMA_SYNC_FORKERNEL);
239256b2bdd1SGireesh Nagabhushana 
239356b2bdd1SGireesh Nagabhushana 		if (copy != 0)
239456b2bdd1SGireesh Nagabhushana 			bcopy(rxb->va + offset, m->b_wptr, n);
239556b2bdd1SGireesh Nagabhushana 		else {
239656b2bdd1SGireesh Nagabhushana 			m = desballoc((unsigned char *)rxb->va + offset, n,
239756b2bdd1SGireesh Nagabhushana 			    BPRI_HI, &rxb->freefunc);
239856b2bdd1SGireesh Nagabhushana 			if (m == NULL) {
2399bbb9d5d6SJohn Levon 				fl->allocb_fail++;
2400bbb9d5d6SJohn Levon 				cmn_err(CE_WARN,
2401bbb9d5d6SJohn Levon 					"%s: mbuf allocation failure "
2402bbb9d5d6SJohn Levon 					"count = %llu", __func__,
2403bbb9d5d6SJohn Levon 					(unsigned long long)fl->allocb_fail);
2404bbb9d5d6SJohn Levon 				if (frame.head)
2405bbb9d5d6SJohn Levon 					freemsgchain(frame.head);
2406bbb9d5d6SJohn Levon 				fl->cidx = rcidx;
2407bbb9d5d6SJohn Levon 				fl->offset = roffset;
240856b2bdd1SGireesh Nagabhushana 				return (NULL);
240956b2bdd1SGireesh Nagabhushana 			}
241056b2bdd1SGireesh Nagabhushana 			atomic_inc_uint(&rxb->ref_cnt);
241156b2bdd1SGireesh Nagabhushana 			if (frame.head != NULL)
241256b2bdd1SGireesh Nagabhushana 				frame.tail->b_cont = m;
241356b2bdd1SGireesh Nagabhushana 			else
241456b2bdd1SGireesh Nagabhushana 				frame.head = m;
241556b2bdd1SGireesh Nagabhushana 			frame.tail = m;
241656b2bdd1SGireesh Nagabhushana 		}
241756b2bdd1SGireesh Nagabhushana 		m->b_wptr += n;
241856b2bdd1SGireesh Nagabhushana 		len -= n;
24193dde7c95SVishal Kulkarni 		offset += roundup(n, sc->sge.fl_align);
242056b2bdd1SGireesh Nagabhushana 		ASSERT(offset <= rxb->buf_size);
242156b2bdd1SGireesh Nagabhushana 		if (offset == rxb->buf_size) {
242256b2bdd1SGireesh Nagabhushana 			offset = 0;
242356b2bdd1SGireesh Nagabhushana 			if (++cidx == fl->cap)
242456b2bdd1SGireesh Nagabhushana 				cidx = 0;
242556b2bdd1SGireesh Nagabhushana 			nbuf++;
242656b2bdd1SGireesh Nagabhushana 		}
242756b2bdd1SGireesh Nagabhushana 	}
242856b2bdd1SGireesh Nagabhushana 
242956b2bdd1SGireesh Nagabhushana 	fl->cidx = cidx;
243056b2bdd1SGireesh Nagabhushana 	fl->offset = offset;
243156b2bdd1SGireesh Nagabhushana 	(*fl_bufs_used) += nbuf;
243256b2bdd1SGireesh Nagabhushana 
243356b2bdd1SGireesh Nagabhushana 	ASSERT(frame.head != NULL);
243456b2bdd1SGireesh Nagabhushana 	return (frame.head);
243556b2bdd1SGireesh Nagabhushana }
243656b2bdd1SGireesh Nagabhushana 
243756b2bdd1SGireesh Nagabhushana /*
243856b2bdd1SGireesh Nagabhushana  * We'll do immediate data tx for non-LSO, but only when not coalescing.  We're
243956b2bdd1SGireesh Nagabhushana  * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
244056b2bdd1SGireesh Nagabhushana  * of immediate data.
244156b2bdd1SGireesh Nagabhushana  */
244256b2bdd1SGireesh Nagabhushana #define	IMM_LEN ( \
244356b2bdd1SGireesh Nagabhushana 	2 * EQ_ESIZE \
244456b2bdd1SGireesh Nagabhushana 	- sizeof (struct fw_eth_tx_pkt_wr) \
244556b2bdd1SGireesh Nagabhushana 	- sizeof (struct cpl_tx_pkt_core))
244656b2bdd1SGireesh Nagabhushana 
244756b2bdd1SGireesh Nagabhushana /*
244856b2bdd1SGireesh Nagabhushana  * Returns non-zero on failure, no need to cleanup anything in that case.
244956b2bdd1SGireesh Nagabhushana  *
245056b2bdd1SGireesh Nagabhushana  * Note 1: We always try to pull up the mblk if required and return E2BIG only
245156b2bdd1SGireesh Nagabhushana  * if this fails.
245256b2bdd1SGireesh Nagabhushana  *
245356b2bdd1SGireesh Nagabhushana  * Note 2: We'll also pullup incoming mblk if HW_LSO is set and the first mblk
245456b2bdd1SGireesh Nagabhushana  * does not have the TCP header in it.
245556b2bdd1SGireesh Nagabhushana  */
245656b2bdd1SGireesh Nagabhushana static int
get_frame_txinfo(struct sge_txq * txq,mblk_t ** fp,struct txinfo * txinfo,int sgl_only)245756b2bdd1SGireesh Nagabhushana get_frame_txinfo(struct sge_txq *txq, mblk_t **fp, struct txinfo *txinfo,
245856b2bdd1SGireesh Nagabhushana     int sgl_only)
245956b2bdd1SGireesh Nagabhushana {
246056b2bdd1SGireesh Nagabhushana 	uint32_t flags = 0, len, n;
246156b2bdd1SGireesh Nagabhushana 	mblk_t *m = *fp;
246256b2bdd1SGireesh Nagabhushana 	int rc;
246356b2bdd1SGireesh Nagabhushana 
246456b2bdd1SGireesh Nagabhushana 	TXQ_LOCK_ASSERT_OWNED(txq);	/* will manipulate txb and dma_hdls */
246556b2bdd1SGireesh Nagabhushana 
246656b2bdd1SGireesh Nagabhushana 	mac_hcksum_get(m, NULL, NULL, NULL, NULL, &flags);
246756b2bdd1SGireesh Nagabhushana 	txinfo->flags = flags;
246856b2bdd1SGireesh Nagabhushana 
246956b2bdd1SGireesh Nagabhushana 	mac_lso_get(m, &txinfo->mss, &flags);
247056b2bdd1SGireesh Nagabhushana 	txinfo->flags |= flags;
247156b2bdd1SGireesh Nagabhushana 
247256b2bdd1SGireesh Nagabhushana 	if (flags & HW_LSO)
247356b2bdd1SGireesh Nagabhushana 		sgl_only = 1;	/* Do not allow immediate data with LSO */
247456b2bdd1SGireesh Nagabhushana 
247556b2bdd1SGireesh Nagabhushana start:	txinfo->nsegs = 0;
247656b2bdd1SGireesh Nagabhushana 	txinfo->hdls_used = 0;
247756b2bdd1SGireesh Nagabhushana 	txinfo->txb_used = 0;
247856b2bdd1SGireesh Nagabhushana 	txinfo->len = 0;
247956b2bdd1SGireesh Nagabhushana 
248056b2bdd1SGireesh Nagabhushana 	/* total length and a rough estimate of # of segments */
248156b2bdd1SGireesh Nagabhushana 	n = 0;
248256b2bdd1SGireesh Nagabhushana 	for (; m; m = m->b_cont) {
248356b2bdd1SGireesh Nagabhushana 		len = MBLKL(m);
248456b2bdd1SGireesh Nagabhushana 		n += (len / PAGE_SIZE) + 1;
248556b2bdd1SGireesh Nagabhushana 		txinfo->len += len;
248656b2bdd1SGireesh Nagabhushana 	}
248756b2bdd1SGireesh Nagabhushana 	m = *fp;
248856b2bdd1SGireesh Nagabhushana 
248956b2bdd1SGireesh Nagabhushana 	if (n >= TX_SGL_SEGS || (flags & HW_LSO && MBLKL(m) < 50)) {
249056b2bdd1SGireesh Nagabhushana 		txq->pullup_early++;
249156b2bdd1SGireesh Nagabhushana 		m = msgpullup(*fp, -1);
249256b2bdd1SGireesh Nagabhushana 		if (m == NULL) {
249356b2bdd1SGireesh Nagabhushana 			txq->pullup_failed++;
249456b2bdd1SGireesh Nagabhushana 			return (E2BIG);	/* (*fp) left as it was */
249556b2bdd1SGireesh Nagabhushana 		}
249656b2bdd1SGireesh Nagabhushana 		freemsg(*fp);
249756b2bdd1SGireesh Nagabhushana 		*fp = m;
24984c028d0bSToomas Soome 		mac_hcksum_set(m, 0, 0, 0, 0, txinfo->flags);
249956b2bdd1SGireesh Nagabhushana 	}
250056b2bdd1SGireesh Nagabhushana 
250156b2bdd1SGireesh Nagabhushana 	if (txinfo->len <= IMM_LEN && !sgl_only)
250256b2bdd1SGireesh Nagabhushana 		return (0);	/* nsegs = 0 tells caller to use imm. tx */
250356b2bdd1SGireesh Nagabhushana 
250456b2bdd1SGireesh Nagabhushana 	if (txinfo->len <= txq->copy_threshold &&
250556b2bdd1SGireesh Nagabhushana 	    copy_into_txb(txq, m, txinfo->len, txinfo) == 0)
250656b2bdd1SGireesh Nagabhushana 		goto done;
250756b2bdd1SGireesh Nagabhushana 
250856b2bdd1SGireesh Nagabhushana 	for (; m; m = m->b_cont) {
250956b2bdd1SGireesh Nagabhushana 
251056b2bdd1SGireesh Nagabhushana 		len = MBLKL(m);
251156b2bdd1SGireesh Nagabhushana 
251256b2bdd1SGireesh Nagabhushana 		/* Use tx copy buffer if this mblk is small enough */
251356b2bdd1SGireesh Nagabhushana 		if (len <= txq->copy_threshold &&
251456b2bdd1SGireesh Nagabhushana 		    copy_into_txb(txq, m, len, txinfo) == 0)
251556b2bdd1SGireesh Nagabhushana 			continue;
251656b2bdd1SGireesh Nagabhushana 
251756b2bdd1SGireesh Nagabhushana 		/* Add DMA bindings for this mblk to the SGL */
251856b2bdd1SGireesh Nagabhushana 		rc = add_mblk(txq, txinfo, m, len);
251956b2bdd1SGireesh Nagabhushana 
252056b2bdd1SGireesh Nagabhushana 		if (rc == E2BIG ||
252156b2bdd1SGireesh Nagabhushana 		    (txinfo->nsegs == TX_SGL_SEGS && m->b_cont)) {
252256b2bdd1SGireesh Nagabhushana 
252356b2bdd1SGireesh Nagabhushana 			txq->pullup_late++;
252456b2bdd1SGireesh Nagabhushana 			m = msgpullup(*fp, -1);
252556b2bdd1SGireesh Nagabhushana 			if (m != NULL) {
252656b2bdd1SGireesh Nagabhushana 				free_txinfo_resources(txq, txinfo);
252756b2bdd1SGireesh Nagabhushana 				freemsg(*fp);
252856b2bdd1SGireesh Nagabhushana 				*fp = m;
25294c028d0bSToomas Soome 				mac_hcksum_set(m, 0, 0, 0, 0, txinfo->flags);
253056b2bdd1SGireesh Nagabhushana 				goto start;
253156b2bdd1SGireesh Nagabhushana 			}
253256b2bdd1SGireesh Nagabhushana 
253356b2bdd1SGireesh Nagabhushana 			txq->pullup_failed++;
253456b2bdd1SGireesh Nagabhushana 			rc = E2BIG;
253556b2bdd1SGireesh Nagabhushana 		}
253656b2bdd1SGireesh Nagabhushana 
253756b2bdd1SGireesh Nagabhushana 		if (rc != 0) {
253856b2bdd1SGireesh Nagabhushana 			free_txinfo_resources(txq, txinfo);
253956b2bdd1SGireesh Nagabhushana 			return (rc);
254056b2bdd1SGireesh Nagabhushana 		}
254156b2bdd1SGireesh Nagabhushana 	}
254256b2bdd1SGireesh Nagabhushana 
254356b2bdd1SGireesh Nagabhushana 	ASSERT(txinfo->nsegs > 0 && txinfo->nsegs <= TX_SGL_SEGS);
254456b2bdd1SGireesh Nagabhushana 
254556b2bdd1SGireesh Nagabhushana done:
254656b2bdd1SGireesh Nagabhushana 
254756b2bdd1SGireesh Nagabhushana 	/*
254856b2bdd1SGireesh Nagabhushana 	 * Store the # of flits required to hold this frame's SGL in nflits.  An
254956b2bdd1SGireesh Nagabhushana 	 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
255056b2bdd1SGireesh Nagabhushana 	 * multiple (len0 + len1, addr0, addr1) tuples.  If addr1 is not used
255156b2bdd1SGireesh Nagabhushana 	 * then len1 must be set to 0.
255256b2bdd1SGireesh Nagabhushana 	 */
255356b2bdd1SGireesh Nagabhushana 	n = txinfo->nsegs - 1;
255456b2bdd1SGireesh Nagabhushana 	txinfo->nflits = (3 * n) / 2 + (n & 1) + 2;
255556b2bdd1SGireesh Nagabhushana 	if (n & 1)
255656b2bdd1SGireesh Nagabhushana 		txinfo->sgl.sge[n / 2].len[1] = cpu_to_be32(0);
255756b2bdd1SGireesh Nagabhushana 
255856b2bdd1SGireesh Nagabhushana 	txinfo->sgl.cmd_nsge = cpu_to_be32(V_ULPTX_CMD((u32)ULP_TX_SC_DSGL) |
255956b2bdd1SGireesh Nagabhushana 	    V_ULPTX_NSGE(txinfo->nsegs));
256056b2bdd1SGireesh Nagabhushana 
256156b2bdd1SGireesh Nagabhushana 	return (0);
256256b2bdd1SGireesh Nagabhushana }
256356b2bdd1SGireesh Nagabhushana 
256456b2bdd1SGireesh Nagabhushana static inline int
fits_in_txb(struct sge_txq * txq,int len,int * waste)256556b2bdd1SGireesh Nagabhushana fits_in_txb(struct sge_txq *txq, int len, int *waste)
256656b2bdd1SGireesh Nagabhushana {
256756b2bdd1SGireesh Nagabhushana 	if (txq->txb_avail < len)
256856b2bdd1SGireesh Nagabhushana 		return (0);
256956b2bdd1SGireesh Nagabhushana 
257056b2bdd1SGireesh Nagabhushana 	if (txq->txb_next + len <= txq->txb_size) {
257156b2bdd1SGireesh Nagabhushana 		*waste = 0;
257256b2bdd1SGireesh Nagabhushana 		return (1);
257356b2bdd1SGireesh Nagabhushana 	}
257456b2bdd1SGireesh Nagabhushana 
257556b2bdd1SGireesh Nagabhushana 	*waste = txq->txb_size - txq->txb_next;
257656b2bdd1SGireesh Nagabhushana 
257756b2bdd1SGireesh Nagabhushana 	return (txq->txb_avail - *waste < len ? 0 : 1);
257856b2bdd1SGireesh Nagabhushana }
257956b2bdd1SGireesh Nagabhushana 
258056b2bdd1SGireesh Nagabhushana #define	TXB_CHUNK	64
258156b2bdd1SGireesh Nagabhushana 
258256b2bdd1SGireesh Nagabhushana /*
258356b2bdd1SGireesh Nagabhushana  * Copies the specified # of bytes into txq's tx copy buffer and updates txinfo
258456b2bdd1SGireesh Nagabhushana  * and txq to indicate resources used.  Caller has to make sure that those many
258556b2bdd1SGireesh Nagabhushana  * bytes are available in the mblk chain (b_cont linked).
258656b2bdd1SGireesh Nagabhushana  */
258756b2bdd1SGireesh Nagabhushana static inline int
copy_into_txb(struct sge_txq * txq,mblk_t * m,int len,struct txinfo * txinfo)258856b2bdd1SGireesh Nagabhushana copy_into_txb(struct sge_txq *txq, mblk_t *m, int len, struct txinfo *txinfo)
258956b2bdd1SGireesh Nagabhushana {
259056b2bdd1SGireesh Nagabhushana 	int waste, n;
259156b2bdd1SGireesh Nagabhushana 
259256b2bdd1SGireesh Nagabhushana 	TXQ_LOCK_ASSERT_OWNED(txq);	/* will manipulate txb */
259356b2bdd1SGireesh Nagabhushana 
259456b2bdd1SGireesh Nagabhushana 	if (!fits_in_txb(txq, len, &waste)) {
259556b2bdd1SGireesh Nagabhushana 		txq->txb_full++;
259656b2bdd1SGireesh Nagabhushana 		return (ENOMEM);
259756b2bdd1SGireesh Nagabhushana 	}
259856b2bdd1SGireesh Nagabhushana 
259956b2bdd1SGireesh Nagabhushana 	if (waste != 0) {
260056b2bdd1SGireesh Nagabhushana 		ASSERT((waste & (TXB_CHUNK - 1)) == 0);
260156b2bdd1SGireesh Nagabhushana 		txinfo->txb_used += waste;
260256b2bdd1SGireesh Nagabhushana 		txq->txb_avail -= waste;
260356b2bdd1SGireesh Nagabhushana 		txq->txb_next = 0;
260456b2bdd1SGireesh Nagabhushana 	}
260556b2bdd1SGireesh Nagabhushana 
260656b2bdd1SGireesh Nagabhushana 	for (n = 0; n < len; m = m->b_cont) {
260756b2bdd1SGireesh Nagabhushana 		bcopy(m->b_rptr, txq->txb_va + txq->txb_next + n, MBLKL(m));
260856b2bdd1SGireesh Nagabhushana 		n += MBLKL(m);
260956b2bdd1SGireesh Nagabhushana 	}
261056b2bdd1SGireesh Nagabhushana 
261156b2bdd1SGireesh Nagabhushana 	add_seg(txinfo, txq->txb_ba + txq->txb_next, len);
261256b2bdd1SGireesh Nagabhushana 
261356b2bdd1SGireesh Nagabhushana 	n = roundup(len, TXB_CHUNK);
261456b2bdd1SGireesh Nagabhushana 	txinfo->txb_used += n;
261556b2bdd1SGireesh Nagabhushana 	txq->txb_avail -= n;
261656b2bdd1SGireesh Nagabhushana 	txq->txb_next += n;
261756b2bdd1SGireesh Nagabhushana 	ASSERT(txq->txb_next <= txq->txb_size);
261856b2bdd1SGireesh Nagabhushana 	if (txq->txb_next == txq->txb_size)
261956b2bdd1SGireesh Nagabhushana 		txq->txb_next = 0;
262056b2bdd1SGireesh Nagabhushana 
262156b2bdd1SGireesh Nagabhushana 	return (0);
262256b2bdd1SGireesh Nagabhushana }
262356b2bdd1SGireesh Nagabhushana 
262456b2bdd1SGireesh Nagabhushana static inline void
add_seg(struct txinfo * txinfo,uint64_t ba,uint32_t len)262556b2bdd1SGireesh Nagabhushana add_seg(struct txinfo *txinfo, uint64_t ba, uint32_t len)
262656b2bdd1SGireesh Nagabhushana {
262756b2bdd1SGireesh Nagabhushana 	ASSERT(txinfo->nsegs < TX_SGL_SEGS);	/* must have room */
262856b2bdd1SGireesh Nagabhushana 
262956b2bdd1SGireesh Nagabhushana 	if (txinfo->nsegs != 0) {
263056b2bdd1SGireesh Nagabhushana 		int idx = txinfo->nsegs - 1;
263156b2bdd1SGireesh Nagabhushana 		txinfo->sgl.sge[idx / 2].len[idx & 1] = cpu_to_be32(len);
263256b2bdd1SGireesh Nagabhushana 		txinfo->sgl.sge[idx / 2].addr[idx & 1] = cpu_to_be64(ba);
263356b2bdd1SGireesh Nagabhushana 	} else {
263456b2bdd1SGireesh Nagabhushana 		txinfo->sgl.len0 = cpu_to_be32(len);
263556b2bdd1SGireesh Nagabhushana 		txinfo->sgl.addr0 = cpu_to_be64(ba);
263656b2bdd1SGireesh Nagabhushana 	}
263756b2bdd1SGireesh Nagabhushana 	txinfo->nsegs++;
263856b2bdd1SGireesh Nagabhushana }
263956b2bdd1SGireesh Nagabhushana 
264056b2bdd1SGireesh Nagabhushana /*
264156b2bdd1SGireesh Nagabhushana  * This function cleans up any partially allocated resources when it fails so
264256b2bdd1SGireesh Nagabhushana  * there's nothing for the caller to clean up in that case.
264356b2bdd1SGireesh Nagabhushana  *
264456b2bdd1SGireesh Nagabhushana  * EIO indicates permanent failure.  Caller should drop the frame containing
264556b2bdd1SGireesh Nagabhushana  * this mblk and continue.
264656b2bdd1SGireesh Nagabhushana  *
264756b2bdd1SGireesh Nagabhushana  * E2BIG indicates that the SGL length for this mblk exceeds the hardware
264856b2bdd1SGireesh Nagabhushana  * limit.  Caller should pull up the frame before trying to send it out.
264956b2bdd1SGireesh Nagabhushana  * (This error means our pullup_early heuristic did not work for this frame)
265056b2bdd1SGireesh Nagabhushana  *
265156b2bdd1SGireesh Nagabhushana  * ENOMEM indicates a temporary shortage of resources (DMA handles, other DMA
265256b2bdd1SGireesh Nagabhushana  * resources, etc.).  Caller should suspend the tx queue and wait for reclaim to
265356b2bdd1SGireesh Nagabhushana  * free up resources.
265456b2bdd1SGireesh Nagabhushana  */
265556b2bdd1SGireesh Nagabhushana static inline int
add_mblk(struct sge_txq * txq,struct txinfo * txinfo,mblk_t * m,int len)265656b2bdd1SGireesh Nagabhushana add_mblk(struct sge_txq *txq, struct txinfo *txinfo, mblk_t *m, int len)
265756b2bdd1SGireesh Nagabhushana {
265856b2bdd1SGireesh Nagabhushana 	ddi_dma_handle_t dhdl;
265956b2bdd1SGireesh Nagabhushana 	ddi_dma_cookie_t cookie;
266056b2bdd1SGireesh Nagabhushana 	uint_t ccount = 0;
266156b2bdd1SGireesh Nagabhushana 	int rc;
266256b2bdd1SGireesh Nagabhushana 
266356b2bdd1SGireesh Nagabhushana 	TXQ_LOCK_ASSERT_OWNED(txq);	/* will manipulate dhdls */
266456b2bdd1SGireesh Nagabhushana 
266556b2bdd1SGireesh Nagabhushana 	if (txq->tx_dhdl_avail == 0) {
266656b2bdd1SGireesh Nagabhushana 		txq->dma_hdl_failed++;
266756b2bdd1SGireesh Nagabhushana 		return (ENOMEM);
266856b2bdd1SGireesh Nagabhushana 	}
266956b2bdd1SGireesh Nagabhushana 
267056b2bdd1SGireesh Nagabhushana 	dhdl = txq->tx_dhdl[txq->tx_dhdl_pidx];
267156b2bdd1SGireesh Nagabhushana 	rc = ddi_dma_addr_bind_handle(dhdl, NULL, (caddr_t)m->b_rptr, len,
267256b2bdd1SGireesh Nagabhushana 	    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &cookie,
267356b2bdd1SGireesh Nagabhushana 	    &ccount);
267456b2bdd1SGireesh Nagabhushana 	if (rc != DDI_DMA_MAPPED) {
267556b2bdd1SGireesh Nagabhushana 		txq->dma_map_failed++;
267656b2bdd1SGireesh Nagabhushana 
267756b2bdd1SGireesh Nagabhushana 		ASSERT(rc != DDI_DMA_INUSE && rc != DDI_DMA_PARTIAL_MAP);
267856b2bdd1SGireesh Nagabhushana 
267956b2bdd1SGireesh Nagabhushana 		return (rc == DDI_DMA_NORESOURCES ? ENOMEM : EIO);
268056b2bdd1SGireesh Nagabhushana 	}
268156b2bdd1SGireesh Nagabhushana 
268256b2bdd1SGireesh Nagabhushana 	if (ccount + txinfo->nsegs > TX_SGL_SEGS) {
268356b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_unbind_handle(dhdl);
268456b2bdd1SGireesh Nagabhushana 		return (E2BIG);
268556b2bdd1SGireesh Nagabhushana 	}
268656b2bdd1SGireesh Nagabhushana 
268756b2bdd1SGireesh Nagabhushana 	add_seg(txinfo, cookie.dmac_laddress, cookie.dmac_size);
268856b2bdd1SGireesh Nagabhushana 	while (--ccount) {
268956b2bdd1SGireesh Nagabhushana 		ddi_dma_nextcookie(dhdl, &cookie);
269056b2bdd1SGireesh Nagabhushana 		add_seg(txinfo, cookie.dmac_laddress, cookie.dmac_size);
269156b2bdd1SGireesh Nagabhushana 	}
269256b2bdd1SGireesh Nagabhushana 
269356b2bdd1SGireesh Nagabhushana 	if (++txq->tx_dhdl_pidx == txq->tx_dhdl_total)
269456b2bdd1SGireesh Nagabhushana 		txq->tx_dhdl_pidx = 0;
269556b2bdd1SGireesh Nagabhushana 	txq->tx_dhdl_avail--;
269656b2bdd1SGireesh Nagabhushana 	txinfo->hdls_used++;
269756b2bdd1SGireesh Nagabhushana 
269856b2bdd1SGireesh Nagabhushana 	return (0);
269956b2bdd1SGireesh Nagabhushana }
270056b2bdd1SGireesh Nagabhushana 
270156b2bdd1SGireesh Nagabhushana /*
270256b2bdd1SGireesh Nagabhushana  * Releases all the txq resources used up in the specified txinfo.
270356b2bdd1SGireesh Nagabhushana  */
270456b2bdd1SGireesh Nagabhushana static void
free_txinfo_resources(struct sge_txq * txq,struct txinfo * txinfo)270556b2bdd1SGireesh Nagabhushana free_txinfo_resources(struct sge_txq *txq, struct txinfo *txinfo)
270656b2bdd1SGireesh Nagabhushana {
270756b2bdd1SGireesh Nagabhushana 	int n;
270856b2bdd1SGireesh Nagabhushana 
270956b2bdd1SGireesh Nagabhushana 	TXQ_LOCK_ASSERT_OWNED(txq);	/* dhdls, txb */
271056b2bdd1SGireesh Nagabhushana 
271156b2bdd1SGireesh Nagabhushana 	n = txinfo->txb_used;
271256b2bdd1SGireesh Nagabhushana 	if (n > 0) {
271356b2bdd1SGireesh Nagabhushana 		txq->txb_avail += n;
271456b2bdd1SGireesh Nagabhushana 		if (n <= txq->txb_next)
271556b2bdd1SGireesh Nagabhushana 			txq->txb_next -= n;
271656b2bdd1SGireesh Nagabhushana 		else {
271756b2bdd1SGireesh Nagabhushana 			n -= txq->txb_next;
271856b2bdd1SGireesh Nagabhushana 			txq->txb_next = txq->txb_size - n;
271956b2bdd1SGireesh Nagabhushana 		}
272056b2bdd1SGireesh Nagabhushana 	}
272156b2bdd1SGireesh Nagabhushana 
272256b2bdd1SGireesh Nagabhushana 	for (n = txinfo->hdls_used; n > 0; n--) {
272356b2bdd1SGireesh Nagabhushana 		if (txq->tx_dhdl_pidx > 0)
272456b2bdd1SGireesh Nagabhushana 			txq->tx_dhdl_pidx--;
272556b2bdd1SGireesh Nagabhushana 		else
272656b2bdd1SGireesh Nagabhushana 			txq->tx_dhdl_pidx = txq->tx_dhdl_total - 1;
272756b2bdd1SGireesh Nagabhushana 		txq->tx_dhdl_avail++;
272856b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_unbind_handle(txq->tx_dhdl[txq->tx_dhdl_pidx]);
272956b2bdd1SGireesh Nagabhushana 	}
273056b2bdd1SGireesh Nagabhushana }
273156b2bdd1SGireesh Nagabhushana 
273256b2bdd1SGireesh Nagabhushana /*
273356b2bdd1SGireesh Nagabhushana  * Returns 0 to indicate that m has been accepted into a coalesced tx work
273456b2bdd1SGireesh Nagabhushana  * request.  It has either been folded into txpkts or txpkts was flushed and m
273556b2bdd1SGireesh Nagabhushana  * has started a new coalesced work request (as the first frame in a fresh
273656b2bdd1SGireesh Nagabhushana  * txpkts).
273756b2bdd1SGireesh Nagabhushana  *
273856b2bdd1SGireesh Nagabhushana  * Returns non-zero to indicate a failure - caller is responsible for
273956b2bdd1SGireesh Nagabhushana  * transmitting m, if there was anything in txpkts it has been flushed.
274056b2bdd1SGireesh Nagabhushana  */
274156b2bdd1SGireesh Nagabhushana static int
add_to_txpkts(struct sge_txq * txq,struct txpkts * txpkts,mblk_t * m,struct txinfo * txinfo)274256b2bdd1SGireesh Nagabhushana add_to_txpkts(struct sge_txq *txq, struct txpkts *txpkts, mblk_t *m,
274356b2bdd1SGireesh Nagabhushana     struct txinfo *txinfo)
274456b2bdd1SGireesh Nagabhushana {
274556b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &txq->eq;
274656b2bdd1SGireesh Nagabhushana 	int can_coalesce;
274756b2bdd1SGireesh Nagabhushana 	struct tx_sdesc *txsd;
274856b2bdd1SGireesh Nagabhushana 	uint8_t flits;
274956b2bdd1SGireesh Nagabhushana 
275056b2bdd1SGireesh Nagabhushana 	TXQ_LOCK_ASSERT_OWNED(txq);
275156b2bdd1SGireesh Nagabhushana 
275256b2bdd1SGireesh Nagabhushana 	if (txpkts->npkt > 0) {
275356b2bdd1SGireesh Nagabhushana 		flits = TXPKTS_PKT_HDR + txinfo->nflits;
275456b2bdd1SGireesh Nagabhushana 		can_coalesce = (txinfo->flags & HW_LSO) == 0 &&
275556b2bdd1SGireesh Nagabhushana 		    txpkts->nflits + flits <= TX_WR_FLITS &&
275656b2bdd1SGireesh Nagabhushana 		    txpkts->nflits + flits <= eq->avail * 8 &&
275756b2bdd1SGireesh Nagabhushana 		    txpkts->plen + txinfo->len < 65536;
275856b2bdd1SGireesh Nagabhushana 
275956b2bdd1SGireesh Nagabhushana 		if (can_coalesce != 0) {
276056b2bdd1SGireesh Nagabhushana 			txpkts->tail->b_next = m;
276156b2bdd1SGireesh Nagabhushana 			txpkts->tail = m;
276256b2bdd1SGireesh Nagabhushana 			txpkts->npkt++;
276356b2bdd1SGireesh Nagabhushana 			txpkts->nflits += flits;
276456b2bdd1SGireesh Nagabhushana 			txpkts->plen += txinfo->len;
276556b2bdd1SGireesh Nagabhushana 
276656b2bdd1SGireesh Nagabhushana 			txsd = &txq->sdesc[eq->pidx];
276756b2bdd1SGireesh Nagabhushana 			txsd->txb_used += txinfo->txb_used;
276856b2bdd1SGireesh Nagabhushana 			txsd->hdls_used += txinfo->hdls_used;
276956b2bdd1SGireesh Nagabhushana 
277056b2bdd1SGireesh Nagabhushana 			return (0);
277156b2bdd1SGireesh Nagabhushana 		}
277256b2bdd1SGireesh Nagabhushana 
277356b2bdd1SGireesh Nagabhushana 		/*
277456b2bdd1SGireesh Nagabhushana 		 * Couldn't coalesce m into txpkts.  The first order of business
277556b2bdd1SGireesh Nagabhushana 		 * is to send txpkts on its way.  Then we'll revisit m.
277656b2bdd1SGireesh Nagabhushana 		 */
277756b2bdd1SGireesh Nagabhushana 		write_txpkts_wr(txq, txpkts);
277856b2bdd1SGireesh Nagabhushana 	}
277956b2bdd1SGireesh Nagabhushana 
278056b2bdd1SGireesh Nagabhushana 	/*
278156b2bdd1SGireesh Nagabhushana 	 * Check if we can start a new coalesced tx work request with m as
278256b2bdd1SGireesh Nagabhushana 	 * the first packet in it.
278356b2bdd1SGireesh Nagabhushana 	 */
278456b2bdd1SGireesh Nagabhushana 
278556b2bdd1SGireesh Nagabhushana 	ASSERT(txpkts->npkt == 0);
278656b2bdd1SGireesh Nagabhushana 	ASSERT(txinfo->len < 65536);
278756b2bdd1SGireesh Nagabhushana 
278856b2bdd1SGireesh Nagabhushana 	flits = TXPKTS_WR_HDR + txinfo->nflits;
278956b2bdd1SGireesh Nagabhushana 	can_coalesce = (txinfo->flags & HW_LSO) == 0 &&
279056b2bdd1SGireesh Nagabhushana 	    flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
279156b2bdd1SGireesh Nagabhushana 
279256b2bdd1SGireesh Nagabhushana 	if (can_coalesce == 0)
279356b2bdd1SGireesh Nagabhushana 		return (EINVAL);
279456b2bdd1SGireesh Nagabhushana 
279556b2bdd1SGireesh Nagabhushana 	/*
279656b2bdd1SGireesh Nagabhushana 	 * Start a fresh coalesced tx WR with m as the first frame in it.
279756b2bdd1SGireesh Nagabhushana 	 */
279856b2bdd1SGireesh Nagabhushana 	txpkts->tail = m;
279956b2bdd1SGireesh Nagabhushana 	txpkts->npkt = 1;
280056b2bdd1SGireesh Nagabhushana 	txpkts->nflits = flits;
280156b2bdd1SGireesh Nagabhushana 	txpkts->flitp = &eq->desc[eq->pidx].flit[2];
280256b2bdd1SGireesh Nagabhushana 	txpkts->plen = txinfo->len;
280356b2bdd1SGireesh Nagabhushana 
280456b2bdd1SGireesh Nagabhushana 	txsd = &txq->sdesc[eq->pidx];
280556b2bdd1SGireesh Nagabhushana 	txsd->m = m;
280656b2bdd1SGireesh Nagabhushana 	txsd->txb_used = txinfo->txb_used;
280756b2bdd1SGireesh Nagabhushana 	txsd->hdls_used = txinfo->hdls_used;
280856b2bdd1SGireesh Nagabhushana 
280956b2bdd1SGireesh Nagabhushana 	return (0);
281056b2bdd1SGireesh Nagabhushana }
281156b2bdd1SGireesh Nagabhushana 
281256b2bdd1SGireesh Nagabhushana /*
281356b2bdd1SGireesh Nagabhushana  * Note that write_txpkts_wr can never run out of hardware descriptors (but
281456b2bdd1SGireesh Nagabhushana  * write_txpkt_wr can).  add_to_txpkts ensures that a frame is accepted for
281556b2bdd1SGireesh Nagabhushana  * coalescing only if sufficient hardware descriptors are available.
281656b2bdd1SGireesh Nagabhushana  */
281756b2bdd1SGireesh Nagabhushana static void
write_txpkts_wr(struct sge_txq * txq,struct txpkts * txpkts)281856b2bdd1SGireesh Nagabhushana write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
281956b2bdd1SGireesh Nagabhushana {
282056b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &txq->eq;
282156b2bdd1SGireesh Nagabhushana 	struct fw_eth_tx_pkts_wr *wr;
282256b2bdd1SGireesh Nagabhushana 	struct tx_sdesc *txsd;
282356b2bdd1SGireesh Nagabhushana 	uint32_t ctrl;
282456b2bdd1SGireesh Nagabhushana 	uint16_t ndesc;
282556b2bdd1SGireesh Nagabhushana 
282656b2bdd1SGireesh Nagabhushana 	TXQ_LOCK_ASSERT_OWNED(txq);	/* pidx, avail */
282756b2bdd1SGireesh Nagabhushana 
282856b2bdd1SGireesh Nagabhushana 	ndesc = howmany(txpkts->nflits, 8);
282956b2bdd1SGireesh Nagabhushana 
283056b2bdd1SGireesh Nagabhushana 	wr = (void *)&eq->desc[eq->pidx];
283156b2bdd1SGireesh Nagabhushana 	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) |
283256b2bdd1SGireesh Nagabhushana 	    V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */
283356b2bdd1SGireesh Nagabhushana 	ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
283456b2bdd1SGireesh Nagabhushana 	if (eq->avail == ndesc)
283556b2bdd1SGireesh Nagabhushana 		ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
283656b2bdd1SGireesh Nagabhushana 	wr->equiq_to_len16 = cpu_to_be32(ctrl);
283756b2bdd1SGireesh Nagabhushana 	wr->plen = cpu_to_be16(txpkts->plen);
283856b2bdd1SGireesh Nagabhushana 	wr->npkt = txpkts->npkt;
283956b2bdd1SGireesh Nagabhushana 	wr->r3 = wr->type = 0;
284056b2bdd1SGireesh Nagabhushana 
284156b2bdd1SGireesh Nagabhushana 	/* Everything else already written */
284256b2bdd1SGireesh Nagabhushana 
284356b2bdd1SGireesh Nagabhushana 	txsd = &txq->sdesc[eq->pidx];
284456b2bdd1SGireesh Nagabhushana 	txsd->desc_used = ndesc;
284556b2bdd1SGireesh Nagabhushana 
284656b2bdd1SGireesh Nagabhushana 	txq->txb_used += txsd->txb_used / TXB_CHUNK;
284756b2bdd1SGireesh Nagabhushana 	txq->hdl_used += txsd->hdls_used;
284856b2bdd1SGireesh Nagabhushana 
284956b2bdd1SGireesh Nagabhushana 	ASSERT(eq->avail >= ndesc);
285056b2bdd1SGireesh Nagabhushana 
285156b2bdd1SGireesh Nagabhushana 	eq->pending += ndesc;
285256b2bdd1SGireesh Nagabhushana 	eq->avail -= ndesc;
285356b2bdd1SGireesh Nagabhushana 	eq->pidx += ndesc;
285456b2bdd1SGireesh Nagabhushana 	if (eq->pidx >= eq->cap)
285556b2bdd1SGireesh Nagabhushana 		eq->pidx -= eq->cap;
285656b2bdd1SGireesh Nagabhushana 
285756b2bdd1SGireesh Nagabhushana 	txq->txpkts_pkts += txpkts->npkt;
285856b2bdd1SGireesh Nagabhushana 	txq->txpkts_wrs++;
285956b2bdd1SGireesh Nagabhushana 	txpkts->npkt = 0;	/* emptied */
286056b2bdd1SGireesh Nagabhushana }
286156b2bdd1SGireesh Nagabhushana 
286256b2bdd1SGireesh Nagabhushana static int
write_txpkt_wr(struct port_info * pi,struct sge_txq * txq,mblk_t * m,struct txinfo * txinfo)286356b2bdd1SGireesh Nagabhushana write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, mblk_t *m,
286456b2bdd1SGireesh Nagabhushana     struct txinfo *txinfo)
286556b2bdd1SGireesh Nagabhushana {
286656b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &txq->eq;
286756b2bdd1SGireesh Nagabhushana 	struct fw_eth_tx_pkt_wr *wr;
286856b2bdd1SGireesh Nagabhushana 	struct cpl_tx_pkt_core *cpl;
286956b2bdd1SGireesh Nagabhushana 	uint32_t ctrl;	/* used in many unrelated places */
287056b2bdd1SGireesh Nagabhushana 	uint64_t ctrl1;
287156b2bdd1SGireesh Nagabhushana 	int nflits, ndesc;
287256b2bdd1SGireesh Nagabhushana 	struct tx_sdesc *txsd;
287356b2bdd1SGireesh Nagabhushana 	caddr_t dst;
287456b2bdd1SGireesh Nagabhushana 
287556b2bdd1SGireesh Nagabhushana 	TXQ_LOCK_ASSERT_OWNED(txq);	/* pidx, avail */
287656b2bdd1SGireesh Nagabhushana 
287756b2bdd1SGireesh Nagabhushana 	/*
287856b2bdd1SGireesh Nagabhushana 	 * Do we have enough flits to send this frame out?
287956b2bdd1SGireesh Nagabhushana 	 */
288056b2bdd1SGireesh Nagabhushana 	ctrl = sizeof (struct cpl_tx_pkt_core);
288156b2bdd1SGireesh Nagabhushana 	if (txinfo->flags & HW_LSO) {
288256b2bdd1SGireesh Nagabhushana 		nflits = TXPKT_LSO_WR_HDR;
2883de483253SVishal Kulkarni 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
288456b2bdd1SGireesh Nagabhushana 	} else
288556b2bdd1SGireesh Nagabhushana 		nflits = TXPKT_WR_HDR;
288656b2bdd1SGireesh Nagabhushana 	if (txinfo->nsegs > 0)
288756b2bdd1SGireesh Nagabhushana 		nflits += txinfo->nflits;
288856b2bdd1SGireesh Nagabhushana 	else {
288956b2bdd1SGireesh Nagabhushana 		nflits += howmany(txinfo->len, 8);
289056b2bdd1SGireesh Nagabhushana 		ctrl += txinfo->len;
289156b2bdd1SGireesh Nagabhushana 	}
289256b2bdd1SGireesh Nagabhushana 	ndesc = howmany(nflits, 8);
289356b2bdd1SGireesh Nagabhushana 	if (ndesc > eq->avail)
289456b2bdd1SGireesh Nagabhushana 		return (ENOMEM);
289556b2bdd1SGireesh Nagabhushana 
289656b2bdd1SGireesh Nagabhushana 	/* Firmware work request header */
289756b2bdd1SGireesh Nagabhushana 	wr = (void *)&eq->desc[eq->pidx];
289856b2bdd1SGireesh Nagabhushana 	wr->op_immdlen = cpu_to_be32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
289956b2bdd1SGireesh Nagabhushana 	    V_FW_WR_IMMDLEN(ctrl));
290056b2bdd1SGireesh Nagabhushana 	ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
290156b2bdd1SGireesh Nagabhushana 	if (eq->avail == ndesc)
290256b2bdd1SGireesh Nagabhushana 		ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
290356b2bdd1SGireesh Nagabhushana 	wr->equiq_to_len16 = cpu_to_be32(ctrl);
290456b2bdd1SGireesh Nagabhushana 	wr->r3 = 0;
290556b2bdd1SGireesh Nagabhushana 
290656b2bdd1SGireesh Nagabhushana 	if (txinfo->flags & HW_LSO) {
2907d75c6062SRobert Mustacchi 		uint16_t etype;
2908de483253SVishal Kulkarni 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
290956b2bdd1SGireesh Nagabhushana 		char *p = (void *)m->b_rptr;
291056b2bdd1SGireesh Nagabhushana 		ctrl = V_LSO_OPCODE((u32)CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
291156b2bdd1SGireesh Nagabhushana 		    F_LSO_LAST_SLICE;
291256b2bdd1SGireesh Nagabhushana 
2913d75c6062SRobert Mustacchi 		etype = ntohs(((struct ether_header *)p)->ether_type);
2914d75c6062SRobert Mustacchi 		if (etype == ETHERTYPE_VLAN) {
291556b2bdd1SGireesh Nagabhushana 			ctrl |= V_LSO_ETHHDR_LEN(1);
2916d75c6062SRobert Mustacchi 			etype = ntohs(((struct ether_vlan_header *)p)->ether_type);
291756b2bdd1SGireesh Nagabhushana 			p += sizeof (struct ether_vlan_header);
2918d75c6062SRobert Mustacchi 		} else {
291956b2bdd1SGireesh Nagabhushana 			p += sizeof (struct ether_header);
2920d75c6062SRobert Mustacchi 		}
2921d75c6062SRobert Mustacchi 
2922d75c6062SRobert Mustacchi 		switch (etype) {
2923d75c6062SRobert Mustacchi 		case ETHERTYPE_IP:
2924d75c6062SRobert Mustacchi 			ctrl |= V_LSO_IPHDR_LEN(IPH_HDR_LENGTH(p) / 4);
2925d75c6062SRobert Mustacchi 			p += IPH_HDR_LENGTH(p);
2926d75c6062SRobert Mustacchi 			break;
2927d75c6062SRobert Mustacchi 		case ETHERTYPE_IPV6:
2928d75c6062SRobert Mustacchi 			ctrl |= F_LSO_IPV6;
2929d75c6062SRobert Mustacchi 			ctrl |= V_LSO_IPHDR_LEN(sizeof (ip6_t) / 4);
2930d75c6062SRobert Mustacchi 			p += sizeof (ip6_t);
2931d75c6062SRobert Mustacchi 		default:
2932d75c6062SRobert Mustacchi 			break;
2933d75c6062SRobert Mustacchi 		}
293456b2bdd1SGireesh Nagabhushana 
293556b2bdd1SGireesh Nagabhushana 		ctrl |= V_LSO_TCPHDR_LEN(TCP_HDR_LENGTH((tcph_t *)p) / 4);
293656b2bdd1SGireesh Nagabhushana 
293756b2bdd1SGireesh Nagabhushana 		lso->lso_ctrl = cpu_to_be32(ctrl);
293856b2bdd1SGireesh Nagabhushana 		lso->ipid_ofst = cpu_to_be16(0);
293956b2bdd1SGireesh Nagabhushana 		lso->mss = cpu_to_be16(txinfo->mss);
294056b2bdd1SGireesh Nagabhushana 		lso->seqno_offset = cpu_to_be32(0);
2941de483253SVishal Kulkarni 		if (is_t4(pi->adapter->params.chip))
2942de483253SVishal Kulkarni 			lso->len = cpu_to_be32(txinfo->len);
2943de483253SVishal Kulkarni 		else
2944de483253SVishal Kulkarni 			lso->len = cpu_to_be32(V_LSO_T5_XFER_SIZE(txinfo->len));
294556b2bdd1SGireesh Nagabhushana 
294656b2bdd1SGireesh Nagabhushana 		cpl = (void *)(lso + 1);
294756b2bdd1SGireesh Nagabhushana 
294856b2bdd1SGireesh Nagabhushana 		txq->tso_wrs++;
294956b2bdd1SGireesh Nagabhushana 	} else
295056b2bdd1SGireesh Nagabhushana 		cpl = (void *)(wr + 1);
295156b2bdd1SGireesh Nagabhushana 
295256b2bdd1SGireesh Nagabhushana 	/* Checksum offload */
295356b2bdd1SGireesh Nagabhushana 	ctrl1 = 0;
295456b2bdd1SGireesh Nagabhushana 	if (!(txinfo->flags & HCK_IPV4_HDRCKSUM))
295556b2bdd1SGireesh Nagabhushana 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
295656b2bdd1SGireesh Nagabhushana 	if (!(txinfo->flags & HCK_FULLCKSUM))
295756b2bdd1SGireesh Nagabhushana 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
295856b2bdd1SGireesh Nagabhushana 	if (ctrl1 == 0)
295956b2bdd1SGireesh Nagabhushana 		txq->txcsum++;	/* some hardware assistance provided */
296056b2bdd1SGireesh Nagabhushana 
296156b2bdd1SGireesh Nagabhushana 	/* CPL header */
296256b2bdd1SGireesh Nagabhushana 	cpl->ctrl0 = cpu_to_be32(V_TXPKT_OPCODE(CPL_TX_PKT) |
296356b2bdd1SGireesh Nagabhushana 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
296456b2bdd1SGireesh Nagabhushana 	cpl->pack = 0;
296556b2bdd1SGireesh Nagabhushana 	cpl->len = cpu_to_be16(txinfo->len);
296656b2bdd1SGireesh Nagabhushana 	cpl->ctrl1 = cpu_to_be64(ctrl1);
296756b2bdd1SGireesh Nagabhushana 
296856b2bdd1SGireesh Nagabhushana 	/* Software descriptor */
296956b2bdd1SGireesh Nagabhushana 	txsd = &txq->sdesc[eq->pidx];
297056b2bdd1SGireesh Nagabhushana 	txsd->m = m;
297156b2bdd1SGireesh Nagabhushana 	txsd->txb_used = txinfo->txb_used;
297256b2bdd1SGireesh Nagabhushana 	txsd->hdls_used = txinfo->hdls_used;
297356b2bdd1SGireesh Nagabhushana 	/* LINTED: E_ASSIGN_NARROW_CONV */
297456b2bdd1SGireesh Nagabhushana 	txsd->desc_used = ndesc;
297556b2bdd1SGireesh Nagabhushana 
297656b2bdd1SGireesh Nagabhushana 	txq->txb_used += txinfo->txb_used / TXB_CHUNK;
297756b2bdd1SGireesh Nagabhushana 	txq->hdl_used += txinfo->hdls_used;
297856b2bdd1SGireesh Nagabhushana 
297956b2bdd1SGireesh Nagabhushana 	eq->pending += ndesc;
298056b2bdd1SGireesh Nagabhushana 	eq->avail -= ndesc;
298156b2bdd1SGireesh Nagabhushana 	eq->pidx += ndesc;
298256b2bdd1SGireesh Nagabhushana 	if (eq->pidx >= eq->cap)
298356b2bdd1SGireesh Nagabhushana 		eq->pidx -= eq->cap;
298456b2bdd1SGireesh Nagabhushana 
298556b2bdd1SGireesh Nagabhushana 	/* SGL */
298656b2bdd1SGireesh Nagabhushana 	dst = (void *)(cpl + 1);
298756b2bdd1SGireesh Nagabhushana 	if (txinfo->nsegs > 0) {
298856b2bdd1SGireesh Nagabhushana 		txq->sgl_wrs++;
298956b2bdd1SGireesh Nagabhushana 		copy_to_txd(eq, (void *)&txinfo->sgl, &dst, txinfo->nflits * 8);
299056b2bdd1SGireesh Nagabhushana 
299156b2bdd1SGireesh Nagabhushana 		/* Need to zero-pad to a 16 byte boundary if not on one */
299256b2bdd1SGireesh Nagabhushana 		if ((uintptr_t)dst & 0xf)
299356b2bdd1SGireesh Nagabhushana 			/* LINTED: E_BAD_PTR_CAST_ALIGN */
299456b2bdd1SGireesh Nagabhushana 			*(uint64_t *)dst = 0;
299556b2bdd1SGireesh Nagabhushana 
299656b2bdd1SGireesh Nagabhushana 	} else {
299756b2bdd1SGireesh Nagabhushana 		txq->imm_wrs++;
299856b2bdd1SGireesh Nagabhushana #ifdef DEBUG
299956b2bdd1SGireesh Nagabhushana 		ctrl = txinfo->len;
300056b2bdd1SGireesh Nagabhushana #endif
300156b2bdd1SGireesh Nagabhushana 		for (; m; m = m->b_cont) {
300256b2bdd1SGireesh Nagabhushana 			copy_to_txd(eq, (void *)m->b_rptr, &dst, MBLKL(m));
300356b2bdd1SGireesh Nagabhushana #ifdef DEBUG
300456b2bdd1SGireesh Nagabhushana 			ctrl -= MBLKL(m);
300556b2bdd1SGireesh Nagabhushana #endif
300656b2bdd1SGireesh Nagabhushana 		}
300756b2bdd1SGireesh Nagabhushana 		ASSERT(ctrl == 0);
300856b2bdd1SGireesh Nagabhushana 	}
300956b2bdd1SGireesh Nagabhushana 
301056b2bdd1SGireesh Nagabhushana 	txq->txpkt_wrs++;
301156b2bdd1SGireesh Nagabhushana 	return (0);
301256b2bdd1SGireesh Nagabhushana }
301356b2bdd1SGireesh Nagabhushana 
301456b2bdd1SGireesh Nagabhushana static inline void
write_ulp_cpl_sgl(struct port_info * pi,struct sge_txq * txq,struct txpkts * txpkts,struct txinfo * txinfo)301556b2bdd1SGireesh Nagabhushana write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
301656b2bdd1SGireesh Nagabhushana     struct txpkts *txpkts, struct txinfo *txinfo)
301756b2bdd1SGireesh Nagabhushana {
301856b2bdd1SGireesh Nagabhushana 	struct ulp_txpkt *ulpmc;
301956b2bdd1SGireesh Nagabhushana 	struct ulptx_idata *ulpsc;
302056b2bdd1SGireesh Nagabhushana 	struct cpl_tx_pkt_core *cpl;
302156b2bdd1SGireesh Nagabhushana 	uintptr_t flitp, start, end;
302256b2bdd1SGireesh Nagabhushana 	uint64_t ctrl;
302356b2bdd1SGireesh Nagabhushana 	caddr_t dst;
302456b2bdd1SGireesh Nagabhushana 
302556b2bdd1SGireesh Nagabhushana 	ASSERT(txpkts->npkt > 0);
302656b2bdd1SGireesh Nagabhushana 
302756b2bdd1SGireesh Nagabhushana 	start = (uintptr_t)txq->eq.desc;
302856b2bdd1SGireesh Nagabhushana 	end = (uintptr_t)txq->eq.spg;
302956b2bdd1SGireesh Nagabhushana 
303056b2bdd1SGireesh Nagabhushana 	/* Checksum offload */
303156b2bdd1SGireesh Nagabhushana 	ctrl = 0;
303256b2bdd1SGireesh Nagabhushana 	if (!(txinfo->flags & HCK_IPV4_HDRCKSUM))
303356b2bdd1SGireesh Nagabhushana 		ctrl |= F_TXPKT_IPCSUM_DIS;
303456b2bdd1SGireesh Nagabhushana 	if (!(txinfo->flags & HCK_FULLCKSUM))
303556b2bdd1SGireesh Nagabhushana 		ctrl |= F_TXPKT_L4CSUM_DIS;
303656b2bdd1SGireesh Nagabhushana 	if (ctrl == 0)
303756b2bdd1SGireesh Nagabhushana 		txq->txcsum++;	/* some hardware assistance provided */
303856b2bdd1SGireesh Nagabhushana 
303956b2bdd1SGireesh Nagabhushana 	/*
304056b2bdd1SGireesh Nagabhushana 	 * The previous packet's SGL must have ended at a 16 byte boundary (this
304156b2bdd1SGireesh Nagabhushana 	 * is required by the firmware/hardware).  It follows that flitp cannot
304256b2bdd1SGireesh Nagabhushana 	 * wrap around between the ULPTX master command and ULPTX subcommand (8
304356b2bdd1SGireesh Nagabhushana 	 * bytes each), and that it can not wrap around in the middle of the
304456b2bdd1SGireesh Nagabhushana 	 * cpl_tx_pkt_core either.
304556b2bdd1SGireesh Nagabhushana 	 */
304656b2bdd1SGireesh Nagabhushana 	flitp = (uintptr_t)txpkts->flitp;
304756b2bdd1SGireesh Nagabhushana 	ASSERT((flitp & 0xf) == 0);
304856b2bdd1SGireesh Nagabhushana 
304956b2bdd1SGireesh Nagabhushana 	/* ULP master command */
305056b2bdd1SGireesh Nagabhushana 	ulpmc = (void *)flitp;
305156b2bdd1SGireesh Nagabhushana 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
305256b2bdd1SGireesh Nagabhushana 	ulpmc->len = htonl(howmany(sizeof (*ulpmc) + sizeof (*ulpsc) +
305356b2bdd1SGireesh Nagabhushana 	    sizeof (*cpl) + 8 * txinfo->nflits, 16));
305456b2bdd1SGireesh Nagabhushana 
305556b2bdd1SGireesh Nagabhushana 	/* ULP subcommand */
305656b2bdd1SGireesh Nagabhushana 	ulpsc = (void *)(ulpmc + 1);
305756b2bdd1SGireesh Nagabhushana 	ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
305856b2bdd1SGireesh Nagabhushana 	    F_ULP_TX_SC_MORE);
305956b2bdd1SGireesh Nagabhushana 	ulpsc->len = cpu_to_be32(sizeof (struct cpl_tx_pkt_core));
306056b2bdd1SGireesh Nagabhushana 
306156b2bdd1SGireesh Nagabhushana 	flitp += sizeof (*ulpmc) + sizeof (*ulpsc);
306256b2bdd1SGireesh Nagabhushana 	if (flitp == end)
306356b2bdd1SGireesh Nagabhushana 		flitp = start;
306456b2bdd1SGireesh Nagabhushana 
306556b2bdd1SGireesh Nagabhushana 	/* CPL_TX_PKT */
306656b2bdd1SGireesh Nagabhushana 	cpl = (void *)flitp;
306756b2bdd1SGireesh Nagabhushana 	cpl->ctrl0 = cpu_to_be32(V_TXPKT_OPCODE(CPL_TX_PKT) |
306856b2bdd1SGireesh Nagabhushana 	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
306956b2bdd1SGireesh Nagabhushana 	cpl->pack = 0;
307056b2bdd1SGireesh Nagabhushana 	cpl->len = cpu_to_be16(txinfo->len);
307156b2bdd1SGireesh Nagabhushana 	cpl->ctrl1 = cpu_to_be64(ctrl);
307256b2bdd1SGireesh Nagabhushana 
307356b2bdd1SGireesh Nagabhushana 	flitp += sizeof (*cpl);
307456b2bdd1SGireesh Nagabhushana 	if (flitp == end)
307556b2bdd1SGireesh Nagabhushana 		flitp = start;
307656b2bdd1SGireesh Nagabhushana 
307756b2bdd1SGireesh Nagabhushana 	/* SGL for this frame */
307856b2bdd1SGireesh Nagabhushana 	dst = (caddr_t)flitp;
307956b2bdd1SGireesh Nagabhushana 	copy_to_txd(&txq->eq, (void *)&txinfo->sgl, &dst, txinfo->nflits * 8);
308056b2bdd1SGireesh Nagabhushana 	flitp = (uintptr_t)dst;
308156b2bdd1SGireesh Nagabhushana 
308256b2bdd1SGireesh Nagabhushana 	/* Zero pad and advance to a 16 byte boundary if not already at one. */
308356b2bdd1SGireesh Nagabhushana 	if (flitp & 0xf) {
308456b2bdd1SGireesh Nagabhushana 
308556b2bdd1SGireesh Nagabhushana 		/* no matter what, flitp should be on an 8 byte boundary */
308656b2bdd1SGireesh Nagabhushana 		ASSERT((flitp & 0x7) == 0);
308756b2bdd1SGireesh Nagabhushana 
308856b2bdd1SGireesh Nagabhushana 		*(uint64_t *)flitp = 0;
308956b2bdd1SGireesh Nagabhushana 		flitp += sizeof (uint64_t);
309056b2bdd1SGireesh Nagabhushana 		txpkts->nflits++;
309156b2bdd1SGireesh Nagabhushana 	}
309256b2bdd1SGireesh Nagabhushana 
309356b2bdd1SGireesh Nagabhushana 	if (flitp == end)
309456b2bdd1SGireesh Nagabhushana 		flitp = start;
309556b2bdd1SGireesh Nagabhushana 
309656b2bdd1SGireesh Nagabhushana 	txpkts->flitp = (void *)flitp;
309756b2bdd1SGireesh Nagabhushana }
309856b2bdd1SGireesh Nagabhushana 
309956b2bdd1SGireesh Nagabhushana static inline void
copy_to_txd(struct sge_eq * eq,caddr_t from,caddr_t * to,int len)310056b2bdd1SGireesh Nagabhushana copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
310156b2bdd1SGireesh Nagabhushana {
310256b2bdd1SGireesh Nagabhushana 	if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) {
310356b2bdd1SGireesh Nagabhushana 		bcopy(from, *to, len);
310456b2bdd1SGireesh Nagabhushana 		(*to) += len;
310556b2bdd1SGireesh Nagabhushana 	} else {
310656b2bdd1SGireesh Nagabhushana 		int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
310756b2bdd1SGireesh Nagabhushana 
310856b2bdd1SGireesh Nagabhushana 		bcopy(from, *to, portion);
310956b2bdd1SGireesh Nagabhushana 		from += portion;
311056b2bdd1SGireesh Nagabhushana 		portion = len - portion;	/* remaining */
311156b2bdd1SGireesh Nagabhushana 		bcopy(from, (void *)eq->desc, portion);
311256b2bdd1SGireesh Nagabhushana 		(*to) = (caddr_t)eq->desc + portion;
311356b2bdd1SGireesh Nagabhushana 	}
311456b2bdd1SGireesh Nagabhushana }
311556b2bdd1SGireesh Nagabhushana 
311656b2bdd1SGireesh Nagabhushana static inline void
ring_tx_db(struct adapter * sc,struct sge_eq * eq)311756b2bdd1SGireesh Nagabhushana ring_tx_db(struct adapter *sc, struct sge_eq *eq)
311856b2bdd1SGireesh Nagabhushana {
3119de483253SVishal Kulkarni 	int val, db_mode;
3120de483253SVishal Kulkarni 	u_int db = eq->doorbells;
3121de483253SVishal Kulkarni 
3122de483253SVishal Kulkarni 	if (eq->pending > 1)
3123de483253SVishal Kulkarni 		db &= ~DOORBELL_WCWR;
3124de483253SVishal Kulkarni 
312556b2bdd1SGireesh Nagabhushana 	if (eq->pending > eq->pidx) {
312656b2bdd1SGireesh Nagabhushana 		int offset = eq->cap - (eq->pending - eq->pidx);
312756b2bdd1SGireesh Nagabhushana 
312856b2bdd1SGireesh Nagabhushana 		/* pidx has wrapped around since last doorbell */
312956b2bdd1SGireesh Nagabhushana 
313056b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_sync(eq->desc_dhdl,
313156b2bdd1SGireesh Nagabhushana 		    offset * sizeof (struct tx_desc), 0,
313256b2bdd1SGireesh Nagabhushana 		    DDI_DMA_SYNC_FORDEV);
313356b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_sync(eq->desc_dhdl,
313456b2bdd1SGireesh Nagabhushana 		    0, eq->pidx * sizeof (struct tx_desc),
313556b2bdd1SGireesh Nagabhushana 		    DDI_DMA_SYNC_FORDEV);
313656b2bdd1SGireesh Nagabhushana 	} else if (eq->pending > 0) {
313756b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_sync(eq->desc_dhdl,
313856b2bdd1SGireesh Nagabhushana 		    (eq->pidx - eq->pending) * sizeof (struct tx_desc),
313956b2bdd1SGireesh Nagabhushana 		    eq->pending * sizeof (struct tx_desc),
314056b2bdd1SGireesh Nagabhushana 		    DDI_DMA_SYNC_FORDEV);
314156b2bdd1SGireesh Nagabhushana 	}
314256b2bdd1SGireesh Nagabhushana 
314356b2bdd1SGireesh Nagabhushana 	membar_producer();
314456b2bdd1SGireesh Nagabhushana 
3145de483253SVishal Kulkarni 	if (is_t4(sc->params.chip))
3146de483253SVishal Kulkarni 		val = V_PIDX(eq->pending);
3147de483253SVishal Kulkarni 	else
3148de483253SVishal Kulkarni 		val = V_PIDX_T5(eq->pending);
3149de483253SVishal Kulkarni 
3150de483253SVishal Kulkarni 	db_mode = (1 << (ffs(db) - 1));
3151de483253SVishal Kulkarni 	switch (db_mode) {
3152de483253SVishal Kulkarni 		case DOORBELL_UDB:
3153de483253SVishal Kulkarni 			*eq->udb = LE_32(V_QID(eq->udb_qid) | val);
3154de483253SVishal Kulkarni 			break;
3155de483253SVishal Kulkarni 
31564e0c5effSToomas Soome 		case DOORBELL_WCWR:
3157de483253SVishal Kulkarni 			{
3158de483253SVishal Kulkarni 				volatile uint64_t *dst, *src;
3159de483253SVishal Kulkarni 				int i;
3160de483253SVishal Kulkarni 				/*
3161de483253SVishal Kulkarni 				 * Queues whose 128B doorbell segment fits in
3162de483253SVishal Kulkarni 				 * the page do not use relative qid
3163de483253SVishal Kulkarni 				 * (udb_qid is always 0).  Only queues with
3164de483253SVishal Kulkarni 				 * doorbell segments can do WCWR.
3165de483253SVishal Kulkarni 				 */
3166de483253SVishal Kulkarni 				ASSERT(eq->udb_qid == 0 && eq->pending == 1);
3167de483253SVishal Kulkarni 
3168de483253SVishal Kulkarni 				dst = (volatile void *)((uintptr_t)eq->udb +
3169de483253SVishal Kulkarni 				    UDBS_WR_OFFSET - UDBS_DB_OFFSET);
3170de483253SVishal Kulkarni 				i = eq->pidx ? eq->pidx - 1 : eq->cap - 1;
3171de483253SVishal Kulkarni 				src = (void *)&eq->desc[i];
3172de483253SVishal Kulkarni 				while (src != (void *)&eq->desc[i + 1])
3173de483253SVishal Kulkarni 				        *dst++ = *src++;
3174de483253SVishal Kulkarni 				membar_producer();
3175de483253SVishal Kulkarni 				break;
3176de483253SVishal Kulkarni 			}
3177de483253SVishal Kulkarni 
3178de483253SVishal Kulkarni 		case DOORBELL_UDBWC:
3179de483253SVishal Kulkarni 			*eq->udb = LE_32(V_QID(eq->udb_qid) | val);
3180de483253SVishal Kulkarni 			membar_producer();
3181de483253SVishal Kulkarni 			break;
318256b2bdd1SGireesh Nagabhushana 
3183de483253SVishal Kulkarni 		case DOORBELL_KDB:
3184de483253SVishal Kulkarni 			t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
3185de483253SVishal Kulkarni 			    V_QID(eq->cntxt_id) | val);
3186de483253SVishal Kulkarni 			break;
3187de483253SVishal Kulkarni 	}
31884e0c5effSToomas Soome 
318956b2bdd1SGireesh Nagabhushana 	eq->pending = 0;
319056b2bdd1SGireesh Nagabhushana }
319156b2bdd1SGireesh Nagabhushana 
319256b2bdd1SGireesh Nagabhushana static int
reclaim_tx_descs(struct sge_txq * txq,int howmany)319356b2bdd1SGireesh Nagabhushana reclaim_tx_descs(struct sge_txq *txq, int howmany)
319456b2bdd1SGireesh Nagabhushana {
319556b2bdd1SGireesh Nagabhushana 	struct tx_sdesc *txsd;
319656b2bdd1SGireesh Nagabhushana 	uint_t cidx, can_reclaim, reclaimed, txb_freed, hdls_freed;
319756b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &txq->eq;
319856b2bdd1SGireesh Nagabhushana 
319956b2bdd1SGireesh Nagabhushana 	EQ_LOCK_ASSERT_OWNED(eq);
320056b2bdd1SGireesh Nagabhushana 
320156b2bdd1SGireesh Nagabhushana 	cidx = eq->spg->cidx;	/* stable snapshot */
320256b2bdd1SGireesh Nagabhushana 	cidx = be16_to_cpu(cidx);
320356b2bdd1SGireesh Nagabhushana 
320456b2bdd1SGireesh Nagabhushana 	if (cidx >= eq->cidx)
320556b2bdd1SGireesh Nagabhushana 		can_reclaim = cidx - eq->cidx;
320656b2bdd1SGireesh Nagabhushana 	else
320756b2bdd1SGireesh Nagabhushana 		can_reclaim = cidx + eq->cap - eq->cidx;
320856b2bdd1SGireesh Nagabhushana 
320956b2bdd1SGireesh Nagabhushana 	if (can_reclaim == 0)
321056b2bdd1SGireesh Nagabhushana 		return (0);
321156b2bdd1SGireesh Nagabhushana 
321256b2bdd1SGireesh Nagabhushana 	txb_freed = hdls_freed = reclaimed = 0;
321356b2bdd1SGireesh Nagabhushana 	do {
321456b2bdd1SGireesh Nagabhushana 		int ndesc;
321556b2bdd1SGireesh Nagabhushana 
321656b2bdd1SGireesh Nagabhushana 		txsd = &txq->sdesc[eq->cidx];
321756b2bdd1SGireesh Nagabhushana 		ndesc = txsd->desc_used;
321856b2bdd1SGireesh Nagabhushana 
321956b2bdd1SGireesh Nagabhushana 		/* Firmware doesn't return "partial" credits. */
322056b2bdd1SGireesh Nagabhushana 		ASSERT(can_reclaim >= ndesc);
322156b2bdd1SGireesh Nagabhushana 
322256b2bdd1SGireesh Nagabhushana 		/*
322356b2bdd1SGireesh Nagabhushana 		 * We always keep mblk around, even for immediate data.  If mblk
322456b2bdd1SGireesh Nagabhushana 		 * is NULL, this has to be the software descriptor for a credit
322556b2bdd1SGireesh Nagabhushana 		 * flush work request.
322656b2bdd1SGireesh Nagabhushana 		 */
322756b2bdd1SGireesh Nagabhushana 		if (txsd->m != NULL)
322856b2bdd1SGireesh Nagabhushana 			freemsgchain(txsd->m);
322956b2bdd1SGireesh Nagabhushana #ifdef DEBUG
323056b2bdd1SGireesh Nagabhushana 		else {
323156b2bdd1SGireesh Nagabhushana 			ASSERT(txsd->txb_used == 0);
323256b2bdd1SGireesh Nagabhushana 			ASSERT(txsd->hdls_used == 0);
323356b2bdd1SGireesh Nagabhushana 			ASSERT(ndesc == 1);
323456b2bdd1SGireesh Nagabhushana 		}
323556b2bdd1SGireesh Nagabhushana #endif
323656b2bdd1SGireesh Nagabhushana 
323756b2bdd1SGireesh Nagabhushana 		txb_freed += txsd->txb_used;
323856b2bdd1SGireesh Nagabhushana 		hdls_freed += txsd->hdls_used;
323956b2bdd1SGireesh Nagabhushana 		reclaimed += ndesc;
324056b2bdd1SGireesh Nagabhushana 
324156b2bdd1SGireesh Nagabhushana 		eq->cidx += ndesc;
324256b2bdd1SGireesh Nagabhushana 		if (eq->cidx >= eq->cap)
324356b2bdd1SGireesh Nagabhushana 			eq->cidx -= eq->cap;
324456b2bdd1SGireesh Nagabhushana 
324556b2bdd1SGireesh Nagabhushana 		can_reclaim -= ndesc;
324656b2bdd1SGireesh Nagabhushana 
324756b2bdd1SGireesh Nagabhushana 	} while (can_reclaim && reclaimed < howmany);
324856b2bdd1SGireesh Nagabhushana 
324956b2bdd1SGireesh Nagabhushana 	eq->avail += reclaimed;
325056b2bdd1SGireesh Nagabhushana 	ASSERT(eq->avail < eq->cap);	/* avail tops out at (cap - 1) */
325156b2bdd1SGireesh Nagabhushana 
325256b2bdd1SGireesh Nagabhushana 	txq->txb_avail += txb_freed;
325356b2bdd1SGireesh Nagabhushana 
325456b2bdd1SGireesh Nagabhushana 	txq->tx_dhdl_avail += hdls_freed;
325556b2bdd1SGireesh Nagabhushana 	ASSERT(txq->tx_dhdl_avail <= txq->tx_dhdl_total);
325656b2bdd1SGireesh Nagabhushana 	for (; hdls_freed; hdls_freed--) {
325756b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_unbind_handle(txq->tx_dhdl[txq->tx_dhdl_cidx]);
325856b2bdd1SGireesh Nagabhushana 		if (++txq->tx_dhdl_cidx == txq->tx_dhdl_total)
325956b2bdd1SGireesh Nagabhushana 			txq->tx_dhdl_cidx = 0;
326056b2bdd1SGireesh Nagabhushana 	}
326156b2bdd1SGireesh Nagabhushana 
326256b2bdd1SGireesh Nagabhushana 	return (reclaimed);
326356b2bdd1SGireesh Nagabhushana }
326456b2bdd1SGireesh Nagabhushana 
326556b2bdd1SGireesh Nagabhushana static void
write_txqflush_wr(struct sge_txq * txq)326656b2bdd1SGireesh Nagabhushana write_txqflush_wr(struct sge_txq *txq)
326756b2bdd1SGireesh Nagabhushana {
326856b2bdd1SGireesh Nagabhushana 	struct sge_eq *eq = &txq->eq;
326956b2bdd1SGireesh Nagabhushana 	struct fw_eq_flush_wr *wr;
327056b2bdd1SGireesh Nagabhushana 	struct tx_sdesc *txsd;
327156b2bdd1SGireesh Nagabhushana 
327256b2bdd1SGireesh Nagabhushana 	EQ_LOCK_ASSERT_OWNED(eq);
327356b2bdd1SGireesh Nagabhushana 	ASSERT(eq->avail > 0);
327456b2bdd1SGireesh Nagabhushana 
327556b2bdd1SGireesh Nagabhushana 	wr = (void *)&eq->desc[eq->pidx];
327656b2bdd1SGireesh Nagabhushana 	bzero(wr, sizeof (*wr));
327756b2bdd1SGireesh Nagabhushana 	wr->opcode = FW_EQ_FLUSH_WR;
327856b2bdd1SGireesh Nagabhushana 	wr->equiq_to_len16 = cpu_to_be32(V_FW_WR_LEN16(sizeof (*wr) / 16) |
327956b2bdd1SGireesh Nagabhushana 	    F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
328056b2bdd1SGireesh Nagabhushana 
328156b2bdd1SGireesh Nagabhushana 	txsd = &txq->sdesc[eq->pidx];
328256b2bdd1SGireesh Nagabhushana 	txsd->m = NULL;
328356b2bdd1SGireesh Nagabhushana 	txsd->txb_used = 0;
328456b2bdd1SGireesh Nagabhushana 	txsd->hdls_used = 0;
328556b2bdd1SGireesh Nagabhushana 	txsd->desc_used = 1;
328656b2bdd1SGireesh Nagabhushana 
328756b2bdd1SGireesh Nagabhushana 	eq->pending++;
328856b2bdd1SGireesh Nagabhushana 	eq->avail--;
328956b2bdd1SGireesh Nagabhushana 	if (++eq->pidx == eq->cap)
329056b2bdd1SGireesh Nagabhushana 		eq->pidx = 0;
329156b2bdd1SGireesh Nagabhushana }
329256b2bdd1SGireesh Nagabhushana 
329356b2bdd1SGireesh Nagabhushana static int
t4_eth_rx(struct sge_iq * iq,const struct rss_header * rss,mblk_t * m)329456b2bdd1SGireesh Nagabhushana t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
329556b2bdd1SGireesh Nagabhushana {
32963dde7c95SVishal Kulkarni 	bool csum_ok;
32973dde7c95SVishal Kulkarni 	uint16_t err_vec;
329856b2bdd1SGireesh Nagabhushana 	struct sge_rxq *rxq = (void *)iq;
329956b2bdd1SGireesh Nagabhushana 	struct mblk_pair chain = {0};
33003dde7c95SVishal Kulkarni 	struct adapter *sc = iq->adapter;
330156b2bdd1SGireesh Nagabhushana 	const struct cpl_rx_pkt *cpl = (const void *)(rss + 1);
330256b2bdd1SGireesh Nagabhushana 
330356b2bdd1SGireesh Nagabhushana 	iq->intr_next = iq->intr_params;
330456b2bdd1SGireesh Nagabhushana 
33053dde7c95SVishal Kulkarni 	m->b_rptr += sc->sge.pktshift;
330656b2bdd1SGireesh Nagabhushana 
33073dde7c95SVishal Kulkarni 	/* Compressed error vector is enabled for T6 only */
33083dde7c95SVishal Kulkarni 	if (sc->params.tp.rx_pkt_encap)
33093dde7c95SVishal Kulkarni 		/* It is enabled only in T6 config file */
33103dde7c95SVishal Kulkarni 		err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
33113dde7c95SVishal Kulkarni 	else
33123dde7c95SVishal Kulkarni 		err_vec = ntohs(cpl->err_vec);
33133dde7c95SVishal Kulkarni 
33143dde7c95SVishal Kulkarni 	csum_ok = cpl->csum_calc && !err_vec;
331556b2bdd1SGireesh Nagabhushana 	/* TODO: what about cpl->ip_frag? */
33163dde7c95SVishal Kulkarni 	if (csum_ok && !cpl->ip_frag) {
331756b2bdd1SGireesh Nagabhushana 		mac_hcksum_set(m, 0, 0, 0, 0xffff,
331856b2bdd1SGireesh Nagabhushana 		    HCK_FULLCKSUM_OK | HCK_FULLCKSUM |
331956b2bdd1SGireesh Nagabhushana 		    HCK_IPV4_HDRCKSUM_OK);
332056b2bdd1SGireesh Nagabhushana 		rxq->rxcsum++;
332156b2bdd1SGireesh Nagabhushana 	}
332256b2bdd1SGireesh Nagabhushana 
332356b2bdd1SGireesh Nagabhushana 	/* Add to the chain that we'll send up */
332456b2bdd1SGireesh Nagabhushana 	if (chain.head != NULL)
332556b2bdd1SGireesh Nagabhushana 		chain.tail->b_next = m;
332656b2bdd1SGireesh Nagabhushana 	else
332756b2bdd1SGireesh Nagabhushana 		chain.head = m;
332856b2bdd1SGireesh Nagabhushana 	chain.tail = m;
332956b2bdd1SGireesh Nagabhushana 
333056b2bdd1SGireesh Nagabhushana 	t4_mac_rx(rxq->port, rxq, chain.head);
333156b2bdd1SGireesh Nagabhushana 
33323dde7c95SVishal Kulkarni 	rxq->rxpkts++;
33333dde7c95SVishal Kulkarni 	rxq->rxbytes  += be16_to_cpu(cpl->len);
333456b2bdd1SGireesh Nagabhushana 	return (0);
333556b2bdd1SGireesh Nagabhushana }
333656b2bdd1SGireesh Nagabhushana 
333756b2bdd1SGireesh Nagabhushana #define	FL_HW_IDX(idx)	((idx) >> 3)
333856b2bdd1SGireesh Nagabhushana 
333956b2bdd1SGireesh Nagabhushana static inline void
ring_fl_db(struct adapter * sc,struct sge_fl * fl)334056b2bdd1SGireesh Nagabhushana ring_fl_db(struct adapter *sc, struct sge_fl *fl)
334156b2bdd1SGireesh Nagabhushana {
334256b2bdd1SGireesh Nagabhushana 	int desc_start, desc_last, ndesc;
33433dde7c95SVishal Kulkarni 	uint32_t v = sc->params.arch.sge_fl_db ;
334456b2bdd1SGireesh Nagabhushana 
334556b2bdd1SGireesh Nagabhushana 	ndesc = FL_HW_IDX(fl->pending);
334656b2bdd1SGireesh Nagabhushana 
334756b2bdd1SGireesh Nagabhushana 	/* Hold back one credit if pidx = cidx */
334856b2bdd1SGireesh Nagabhushana 	if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx))
334956b2bdd1SGireesh Nagabhushana 		ndesc--;
335056b2bdd1SGireesh Nagabhushana 
335156b2bdd1SGireesh Nagabhushana 	/*
335256b2bdd1SGireesh Nagabhushana 	 * There are chances of ndesc modified above (to avoid pidx = cidx).
335356b2bdd1SGireesh Nagabhushana 	 * If there is nothing to post, return.
335456b2bdd1SGireesh Nagabhushana 	 */
335556b2bdd1SGireesh Nagabhushana 	if (ndesc <= 0)
335656b2bdd1SGireesh Nagabhushana 		return;
335756b2bdd1SGireesh Nagabhushana 
335856b2bdd1SGireesh Nagabhushana 	desc_last = FL_HW_IDX(fl->pidx);
335956b2bdd1SGireesh Nagabhushana 
336056b2bdd1SGireesh Nagabhushana 	if (fl->pidx < fl->pending) {
336156b2bdd1SGireesh Nagabhushana 		/* There was a wrap */
336256b2bdd1SGireesh Nagabhushana 		desc_start = FL_HW_IDX(fl->pidx + fl->cap - fl->pending);
336356b2bdd1SGireesh Nagabhushana 
336456b2bdd1SGireesh Nagabhushana 		/* From desc_start to the end of list */
336556b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_sync(fl->dhdl, desc_start * RX_FL_ESIZE, 0,
336656b2bdd1SGireesh Nagabhushana 		    DDI_DMA_SYNC_FORDEV);
336756b2bdd1SGireesh Nagabhushana 
336856b2bdd1SGireesh Nagabhushana 		/* From start of list to the desc_last */
336956b2bdd1SGireesh Nagabhushana 		if (desc_last != 0)
337056b2bdd1SGireesh Nagabhushana 			(void) ddi_dma_sync(fl->dhdl, 0, desc_last *
337156b2bdd1SGireesh Nagabhushana 			    RX_FL_ESIZE, DDI_DMA_SYNC_FORDEV);
337256b2bdd1SGireesh Nagabhushana 	} else {
337356b2bdd1SGireesh Nagabhushana 		/* There was no wrap, sync from start_desc to last_desc */
337456b2bdd1SGireesh Nagabhushana 		desc_start = FL_HW_IDX(fl->pidx - fl->pending);
337556b2bdd1SGireesh Nagabhushana 		(void) ddi_dma_sync(fl->dhdl, desc_start * RX_FL_ESIZE,
337656b2bdd1SGireesh Nagabhushana 		    ndesc * RX_FL_ESIZE, DDI_DMA_SYNC_FORDEV);
337756b2bdd1SGireesh Nagabhushana 	}
337856b2bdd1SGireesh Nagabhushana 
3379de483253SVishal Kulkarni 	if (is_t4(sc->params.chip))
33803dde7c95SVishal Kulkarni 		v |= V_PIDX(ndesc);
3381de483253SVishal Kulkarni 	else
33823dde7c95SVishal Kulkarni 		v |= V_PIDX_T5(ndesc);
33833dde7c95SVishal Kulkarni 	v |= V_QID(fl->cntxt_id) | V_PIDX(ndesc);
3384de483253SVishal Kulkarni 
338556b2bdd1SGireesh Nagabhushana 	membar_producer();
338656b2bdd1SGireesh Nagabhushana 
3387de483253SVishal Kulkarni 	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v);
338856b2bdd1SGireesh Nagabhushana 
338956b2bdd1SGireesh Nagabhushana 	/*
339056b2bdd1SGireesh Nagabhushana 	 * Update pending count:
339156b2bdd1SGireesh Nagabhushana 	 * Deduct the number of descriptors posted
339256b2bdd1SGireesh Nagabhushana 	 */
339356b2bdd1SGireesh Nagabhushana 	fl->pending -= ndesc * 8;
339456b2bdd1SGireesh Nagabhushana }
339556b2bdd1SGireesh Nagabhushana 
339606b05760SVishal Kulkarni static void
tx_reclaim_task(void * arg)339706b05760SVishal Kulkarni tx_reclaim_task(void *arg)
339806b05760SVishal Kulkarni {
339906b05760SVishal Kulkarni 	struct sge_txq *txq = arg;
340006b05760SVishal Kulkarni 
340106b05760SVishal Kulkarni 	TXQ_LOCK(txq);
340206b05760SVishal Kulkarni 	reclaim_tx_descs(txq, txq->eq.qsize);
340306b05760SVishal Kulkarni 	TXQ_UNLOCK(txq);
340406b05760SVishal Kulkarni }
340506b05760SVishal Kulkarni 
340656b2bdd1SGireesh Nagabhushana /* ARGSUSED */
34073dde7c95SVishal Kulkarni static int
handle_sge_egr_update(struct sge_iq * iq,const struct rss_header * rss,mblk_t * m)34083dde7c95SVishal Kulkarni handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
34093dde7c95SVishal Kulkarni                 mblk_t *m)
34103dde7c95SVishal Kulkarni {
34113dde7c95SVishal Kulkarni 	const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1);
34123dde7c95SVishal Kulkarni 	unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid));
34133dde7c95SVishal Kulkarni 	struct adapter *sc = iq->adapter;
34143dde7c95SVishal Kulkarni 	struct sge *s = &sc->sge;
341506b05760SVishal Kulkarni 	struct sge_eq *eq;
34163dde7c95SVishal Kulkarni 	struct sge_txq *txq;
34173dde7c95SVishal Kulkarni 
34183dde7c95SVishal Kulkarni 	txq = (void *)s->eqmap[qid - s->eq_start];
341906b05760SVishal Kulkarni 	eq = &txq->eq;
34203dde7c95SVishal Kulkarni 	txq->qflush++;
34213dde7c95SVishal Kulkarni 	t4_mac_tx_update(txq->port, txq);
34223dde7c95SVishal Kulkarni 
342306b05760SVishal Kulkarni 	ddi_taskq_dispatch(sc->tq[eq->tx_chan], tx_reclaim_task,
342406b05760SVishal Kulkarni 		(void *)txq, DDI_NOSLEEP);
342506b05760SVishal Kulkarni 
34263dde7c95SVishal Kulkarni 	return (0);
34273dde7c95SVishal Kulkarni }
34283dde7c95SVishal Kulkarni 
342956b2bdd1SGireesh Nagabhushana static int
handle_fw_rpl(struct sge_iq * iq,const struct rss_header * rss,mblk_t * m)343056b2bdd1SGireesh Nagabhushana handle_fw_rpl(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
343156b2bdd1SGireesh Nagabhushana {
3432de483253SVishal Kulkarni 	struct adapter *sc = iq->adapter;
343356b2bdd1SGireesh Nagabhushana 	const struct cpl_fw6_msg *cpl = (const void *)(rss + 1);
343456b2bdd1SGireesh Nagabhushana 
343556b2bdd1SGireesh Nagabhushana 	ASSERT(m == NULL);
343656b2bdd1SGireesh Nagabhushana 
3437de483253SVishal Kulkarni 	if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) {
3438de483253SVishal Kulkarni 		const struct rss_header *rss2;
343956b2bdd1SGireesh Nagabhushana 
3440de483253SVishal Kulkarni 		rss2 = (const struct rss_header *)&cpl->data[0];
3441de483253SVishal Kulkarni 		return (sc->cpl_handler[rss2->opcode](iq, rss2, m));
3442de483253SVishal Kulkarni 	}
3443de483253SVishal Kulkarni 	return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0]));
344456b2bdd1SGireesh Nagabhushana }
344556b2bdd1SGireesh Nagabhushana 
344656b2bdd1SGireesh Nagabhushana int
t4_alloc_tx_maps(struct adapter * sc,struct tx_maps * txmaps,int count,int flags)344756b2bdd1SGireesh Nagabhushana t4_alloc_tx_maps(struct adapter *sc, struct tx_maps *txmaps, int count,
344856b2bdd1SGireesh Nagabhushana     int flags)
344956b2bdd1SGireesh Nagabhushana {
345056b2bdd1SGireesh Nagabhushana 	int i, rc;
345156b2bdd1SGireesh Nagabhushana 
345256b2bdd1SGireesh Nagabhushana 	txmaps->map_total =  count;
345356b2bdd1SGireesh Nagabhushana 	txmaps->map_avail = txmaps->map_cidx = txmaps->map_pidx = 0;
345456b2bdd1SGireesh Nagabhushana 
345556b2bdd1SGireesh Nagabhushana 	txmaps->map =  kmem_zalloc(sizeof (ddi_dma_handle_t) *
345656b2bdd1SGireesh Nagabhushana 	    txmaps->map_total, flags);
345756b2bdd1SGireesh Nagabhushana 
345856b2bdd1SGireesh Nagabhushana 	for (i = 0; i < count; i++) {
345956b2bdd1SGireesh Nagabhushana 		rc = ddi_dma_alloc_handle(sc->dip, &sc->sge.dma_attr_tx,
346056b2bdd1SGireesh Nagabhushana 		    DDI_DMA_SLEEP, 0, &txmaps->map[i]);
346156b2bdd1SGireesh Nagabhushana 		if (rc != DDI_SUCCESS) {
346256b2bdd1SGireesh Nagabhushana 			cxgb_printf(sc->dip, CE_WARN,
346356b2bdd1SGireesh Nagabhushana 			    "%s: failed to allocate DMA handle (%d)",
346456b2bdd1SGireesh Nagabhushana 			    __func__, rc);
346556b2bdd1SGireesh Nagabhushana 			return (rc == DDI_DMA_NORESOURCES ? ENOMEM : EINVAL);
346656b2bdd1SGireesh Nagabhushana 		}
346756b2bdd1SGireesh Nagabhushana 		txmaps->map_avail++;
346856b2bdd1SGireesh Nagabhushana 	}
346956b2bdd1SGireesh Nagabhushana 
347056b2bdd1SGireesh Nagabhushana 	return (0);
347156b2bdd1SGireesh Nagabhushana }
347256b2bdd1SGireesh Nagabhushana 
347356b2bdd1SGireesh Nagabhushana #define	KS_UINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
347456b2bdd1SGireesh Nagabhushana #define	KS_CINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
347556b2bdd1SGireesh Nagabhushana #define	KS_U_SET(x, y)	kstatp->x.value.ul = (y)
347656b2bdd1SGireesh Nagabhushana #define	KS_U_FROM(x, y)	kstatp->x.value.ul = (y)->x
347756b2bdd1SGireesh Nagabhushana #define	KS_C_SET(x, ...)	\
347856b2bdd1SGireesh Nagabhushana 			(void) snprintf(kstatp->x.value.c, 16,  __VA_ARGS__)
347956b2bdd1SGireesh Nagabhushana 
348056b2bdd1SGireesh Nagabhushana /*
348156b2bdd1SGireesh Nagabhushana  * cxgbe:X:config
348256b2bdd1SGireesh Nagabhushana  */
348356b2bdd1SGireesh Nagabhushana struct cxgbe_port_config_kstats {
348456b2bdd1SGireesh Nagabhushana 	kstat_named_t idx;
348556b2bdd1SGireesh Nagabhushana 	kstat_named_t nrxq;
348656b2bdd1SGireesh Nagabhushana 	kstat_named_t ntxq;
348756b2bdd1SGireesh Nagabhushana 	kstat_named_t first_rxq;
348856b2bdd1SGireesh Nagabhushana 	kstat_named_t first_txq;
348956b2bdd1SGireesh Nagabhushana 	kstat_named_t controller;
349056b2bdd1SGireesh Nagabhushana 	kstat_named_t factory_mac_address;
349156b2bdd1SGireesh Nagabhushana };
349256b2bdd1SGireesh Nagabhushana 
349356b2bdd1SGireesh Nagabhushana /*
349456b2bdd1SGireesh Nagabhushana  * cxgbe:X:info
349556b2bdd1SGireesh Nagabhushana  */
349656b2bdd1SGireesh Nagabhushana struct cxgbe_port_info_kstats {
349756b2bdd1SGireesh Nagabhushana 	kstat_named_t transceiver;
349856b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_ovflow0;
349956b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_ovflow1;
350056b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_ovflow2;
350156b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_ovflow3;
350256b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_trunc0;
350356b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_trunc1;
350456b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_trunc2;
350556b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_trunc3;
350656b2bdd1SGireesh Nagabhushana 	kstat_named_t tx_pause;
350756b2bdd1SGireesh Nagabhushana 	kstat_named_t rx_pause;
350856b2bdd1SGireesh Nagabhushana };
350956b2bdd1SGireesh Nagabhushana 
351056b2bdd1SGireesh Nagabhushana static kstat_t *
setup_port_config_kstats(struct port_info * pi)351156b2bdd1SGireesh Nagabhushana setup_port_config_kstats(struct port_info *pi)
351256b2bdd1SGireesh Nagabhushana {
351356b2bdd1SGireesh Nagabhushana 	kstat_t *ksp;
351456b2bdd1SGireesh Nagabhushana 	struct cxgbe_port_config_kstats *kstatp;
351556b2bdd1SGireesh Nagabhushana 	int ndata;
351656b2bdd1SGireesh Nagabhushana 	dev_info_t *pdip = ddi_get_parent(pi->dip);
351756b2bdd1SGireesh Nagabhushana 	uint8_t *ma = &pi->hw_addr[0];
351856b2bdd1SGireesh Nagabhushana 
351956b2bdd1SGireesh Nagabhushana 	ndata = sizeof (struct cxgbe_port_config_kstats) /
352056b2bdd1SGireesh Nagabhushana 	    sizeof (kstat_named_t);
352156b2bdd1SGireesh Nagabhushana 
352256b2bdd1SGireesh Nagabhushana 	ksp = kstat_create(T4_PORT_NAME, ddi_get_instance(pi->dip), "config",
352356b2bdd1SGireesh Nagabhushana 	    "net", KSTAT_TYPE_NAMED, ndata, 0);
352456b2bdd1SGireesh Nagabhushana 	if (ksp == NULL) {
352556b2bdd1SGireesh Nagabhushana 		cxgb_printf(pi->dip, CE_WARN, "failed to initialize kstats.");
352656b2bdd1SGireesh Nagabhushana 		return (NULL);
352756b2bdd1SGireesh Nagabhushana 	}
352856b2bdd1SGireesh Nagabhushana 
352956b2bdd1SGireesh Nagabhushana 	kstatp = (struct cxgbe_port_config_kstats *)ksp->ks_data;
353056b2bdd1SGireesh Nagabhushana 
353156b2bdd1SGireesh Nagabhushana 	KS_UINIT(idx);
353256b2bdd1SGireesh Nagabhushana 	KS_UINIT(nrxq);
353356b2bdd1SGireesh Nagabhushana 	KS_UINIT(ntxq);
353456b2bdd1SGireesh Nagabhushana 	KS_UINIT(first_rxq);
353556b2bdd1SGireesh Nagabhushana 	KS_UINIT(first_txq);
353656b2bdd1SGireesh Nagabhushana 	KS_CINIT(controller);
353756b2bdd1SGireesh Nagabhushana 	KS_CINIT(factory_mac_address);
353856b2bdd1SGireesh Nagabhushana 
353956b2bdd1SGireesh Nagabhushana 	KS_U_SET(idx, pi->port_id);
354056b2bdd1SGireesh Nagabhushana 	KS_U_SET(nrxq, pi->nrxq);
354156b2bdd1SGireesh Nagabhushana 	KS_U_SET(ntxq, pi->ntxq);
354256b2bdd1SGireesh Nagabhushana 	KS_U_SET(first_rxq, pi->first_rxq);
354356b2bdd1SGireesh Nagabhushana 	KS_U_SET(first_txq, pi->first_txq);
354456b2bdd1SGireesh Nagabhushana 	KS_C_SET(controller, "%s%d", ddi_driver_name(pdip),
354556b2bdd1SGireesh Nagabhushana 	    ddi_get_instance(pdip));
354656b2bdd1SGireesh Nagabhushana 	KS_C_SET(factory_mac_address, "%02X%02X%02X%02X%02X%02X",
354756b2bdd1SGireesh Nagabhushana 	    ma[0], ma[1], ma[2], ma[3], ma[4], ma[5]);
354856b2bdd1SGireesh Nagabhushana 
354956b2bdd1SGireesh Nagabhushana 	/* Do NOT set ksp->ks_update.  These kstats do not change. */
355056b2bdd1SGireesh Nagabhushana 
355156b2bdd1SGireesh Nagabhushana 	/* Install the kstat */
355256b2bdd1SGireesh Nagabhushana 	ksp->ks_private = (void *)pi;
355356b2bdd1SGireesh Nagabhushana 	kstat_install(ksp);
355456b2bdd1SGireesh Nagabhushana 
355556b2bdd1SGireesh Nagabhushana 	return (ksp);
355656b2bdd1SGireesh Nagabhushana }
355756b2bdd1SGireesh Nagabhushana 
355856b2bdd1SGireesh Nagabhushana static kstat_t *
setup_port_info_kstats(struct port_info * pi)355956b2bdd1SGireesh Nagabhushana setup_port_info_kstats(struct port_info *pi)
356056b2bdd1SGireesh Nagabhushana {
356156b2bdd1SGireesh Nagabhushana 	kstat_t *ksp;
356256b2bdd1SGireesh Nagabhushana 	struct cxgbe_port_info_kstats *kstatp;
356356b2bdd1SGireesh Nagabhushana 	int ndata;
356456b2bdd1SGireesh Nagabhushana 
356556b2bdd1SGireesh Nagabhushana 	ndata = sizeof (struct cxgbe_port_info_kstats) / sizeof (kstat_named_t);
356656b2bdd1SGireesh Nagabhushana 
356756b2bdd1SGireesh Nagabhushana 	ksp = kstat_create(T4_PORT_NAME, ddi_get_instance(pi->dip), "info",
356856b2bdd1SGireesh Nagabhushana 	    "net", KSTAT_TYPE_NAMED, ndata, 0);
356956b2bdd1SGireesh Nagabhushana 	if (ksp == NULL) {
357056b2bdd1SGireesh Nagabhushana 		cxgb_printf(pi->dip, CE_WARN, "failed to initialize kstats.");
357156b2bdd1SGireesh Nagabhushana 		return (NULL);
357256b2bdd1SGireesh Nagabhushana 	}
357356b2bdd1SGireesh Nagabhushana 
357456b2bdd1SGireesh Nagabhushana 	kstatp = (struct cxgbe_port_info_kstats *)ksp->ks_data;
357556b2bdd1SGireesh Nagabhushana 
357656b2bdd1SGireesh Nagabhushana 	KS_CINIT(transceiver);
357756b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_ovflow0);
357856b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_ovflow1);
357956b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_ovflow2);
358056b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_ovflow3);
358156b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_trunc0);
358256b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_trunc1);
358356b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_trunc2);
358456b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_trunc3);
358556b2bdd1SGireesh Nagabhushana 	KS_UINIT(tx_pause);
358656b2bdd1SGireesh Nagabhushana 	KS_UINIT(rx_pause);
358756b2bdd1SGireesh Nagabhushana 
358856b2bdd1SGireesh Nagabhushana 	/* Install the kstat */
358956b2bdd1SGireesh Nagabhushana 	ksp->ks_update = update_port_info_kstats;
359056b2bdd1SGireesh Nagabhushana 	ksp->ks_private = (void *)pi;
359156b2bdd1SGireesh Nagabhushana 	kstat_install(ksp);
359256b2bdd1SGireesh Nagabhushana 
359356b2bdd1SGireesh Nagabhushana 	return (ksp);
359456b2bdd1SGireesh Nagabhushana }
359556b2bdd1SGireesh Nagabhushana 
359656b2bdd1SGireesh Nagabhushana static int
update_port_info_kstats(kstat_t * ksp,int rw)359756b2bdd1SGireesh Nagabhushana update_port_info_kstats(kstat_t *ksp, int rw)
359856b2bdd1SGireesh Nagabhushana {
359956b2bdd1SGireesh Nagabhushana 	struct cxgbe_port_info_kstats *kstatp =
360056b2bdd1SGireesh Nagabhushana 	    (struct cxgbe_port_info_kstats *)ksp->ks_data;
360156b2bdd1SGireesh Nagabhushana 	struct port_info *pi = ksp->ks_private;
360256b2bdd1SGireesh Nagabhushana 	static const char *mod_str[] = { NULL, "LR", "SR", "ER", "TWINAX",
360356b2bdd1SGireesh Nagabhushana 	    "active TWINAX", "LRM" };
360456b2bdd1SGireesh Nagabhushana 	uint32_t bgmap;
360556b2bdd1SGireesh Nagabhushana 
360656b2bdd1SGireesh Nagabhushana 	if (rw == KSTAT_WRITE)
360756b2bdd1SGireesh Nagabhushana 		return (0);
360856b2bdd1SGireesh Nagabhushana 
360956b2bdd1SGireesh Nagabhushana 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
361056b2bdd1SGireesh Nagabhushana 		KS_C_SET(transceiver, "unplugged");
361156b2bdd1SGireesh Nagabhushana 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
361256b2bdd1SGireesh Nagabhushana 		KS_C_SET(transceiver, "unknown");
361356b2bdd1SGireesh Nagabhushana 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
361456b2bdd1SGireesh Nagabhushana 		KS_C_SET(transceiver, "unsupported");
361556b2bdd1SGireesh Nagabhushana 	else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
361656b2bdd1SGireesh Nagabhushana 		KS_C_SET(transceiver, "%s", mod_str[pi->mod_type]);
361756b2bdd1SGireesh Nagabhushana 	else
361856b2bdd1SGireesh Nagabhushana 		KS_C_SET(transceiver, "type %d", pi->mod_type);
361956b2bdd1SGireesh Nagabhushana 
362056b2bdd1SGireesh Nagabhushana #define	GET_STAT(name) t4_read_reg64(pi->adapter, \
362156b2bdd1SGireesh Nagabhushana 	    PORT_REG(pi->port_id, A_MPS_PORT_STAT_##name##_L))
362256b2bdd1SGireesh Nagabhushana #define	GET_STAT_COM(name) t4_read_reg64(pi->adapter, \
362356b2bdd1SGireesh Nagabhushana 	    A_MPS_STAT_##name##_L)
362456b2bdd1SGireesh Nagabhushana 
362556b2bdd1SGireesh Nagabhushana 	bgmap = G_NUMPORTS(t4_read_reg(pi->adapter, A_MPS_CMN_CTL));
362656b2bdd1SGireesh Nagabhushana 	if (bgmap == 0)
362756b2bdd1SGireesh Nagabhushana 		bgmap = (pi->port_id == 0) ? 0xf : 0;
362856b2bdd1SGireesh Nagabhushana 	else if (bgmap == 1)
362956b2bdd1SGireesh Nagabhushana 		bgmap = (pi->port_id < 2) ? (3 << (2 * pi->port_id)) : 0;
363056b2bdd1SGireesh Nagabhushana 	else
363156b2bdd1SGireesh Nagabhushana 		bgmap = 1;
363256b2bdd1SGireesh Nagabhushana 
363356b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_ovflow0, (bgmap & 1) ?
363456b2bdd1SGireesh Nagabhushana 	    GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0);
363556b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_ovflow1, (bgmap & 2) ?
363656b2bdd1SGireesh Nagabhushana 	    GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0);
363756b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_ovflow2, (bgmap & 4) ?
363856b2bdd1SGireesh Nagabhushana 	    GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0);
363956b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_ovflow3, (bgmap & 8) ?
364056b2bdd1SGireesh Nagabhushana 	    GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0);
364156b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_trunc0,  (bgmap & 1) ?
364256b2bdd1SGireesh Nagabhushana 	    GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0);
364356b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_trunc1,  (bgmap & 2) ?
364456b2bdd1SGireesh Nagabhushana 	    GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0);
364556b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_trunc2,  (bgmap & 4) ?
364656b2bdd1SGireesh Nagabhushana 	    GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0);
364756b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_trunc3,  (bgmap & 8) ?
364856b2bdd1SGireesh Nagabhushana 	    GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0);
364956b2bdd1SGireesh Nagabhushana 
365056b2bdd1SGireesh Nagabhushana 	KS_U_SET(tx_pause, GET_STAT(TX_PORT_PAUSE));
365156b2bdd1SGireesh Nagabhushana 	KS_U_SET(rx_pause, GET_STAT(RX_PORT_PAUSE));
365256b2bdd1SGireesh Nagabhushana 
365356b2bdd1SGireesh Nagabhushana 	return (0);
365456b2bdd1SGireesh Nagabhushana 
365556b2bdd1SGireesh Nagabhushana }
365656b2bdd1SGireesh Nagabhushana 
365756b2bdd1SGireesh Nagabhushana /*
365856b2bdd1SGireesh Nagabhushana  * cxgbe:X:rxqY
365956b2bdd1SGireesh Nagabhushana  */
366056b2bdd1SGireesh Nagabhushana struct rxq_kstats {
366156b2bdd1SGireesh Nagabhushana 	kstat_named_t rxcsum;
36623dde7c95SVishal Kulkarni 	kstat_named_t rxpkts;
36633dde7c95SVishal Kulkarni 	kstat_named_t rxbytes;
366456b2bdd1SGireesh Nagabhushana 	kstat_named_t nomem;
366556b2bdd1SGireesh Nagabhushana };
366656b2bdd1SGireesh Nagabhushana 
366756b2bdd1SGireesh Nagabhushana static kstat_t *
setup_rxq_kstats(struct port_info * pi,struct sge_rxq * rxq,int idx)366856b2bdd1SGireesh Nagabhushana setup_rxq_kstats(struct port_info *pi, struct sge_rxq *rxq, int idx)
366956b2bdd1SGireesh Nagabhushana {
367056b2bdd1SGireesh Nagabhushana 	struct kstat *ksp;
367156b2bdd1SGireesh Nagabhushana 	struct rxq_kstats *kstatp;
367256b2bdd1SGireesh Nagabhushana 	int ndata;
367356b2bdd1SGireesh Nagabhushana 	char str[16];
367456b2bdd1SGireesh Nagabhushana 
367556b2bdd1SGireesh Nagabhushana 	ndata = sizeof (struct rxq_kstats) / sizeof (kstat_named_t);
367656b2bdd1SGireesh Nagabhushana 	(void) snprintf(str, sizeof (str), "rxq%u", idx);
367756b2bdd1SGireesh Nagabhushana 
367856b2bdd1SGireesh Nagabhushana 	ksp = kstat_create(T4_PORT_NAME, ddi_get_instance(pi->dip), str, "rxq",
367956b2bdd1SGireesh Nagabhushana 	    KSTAT_TYPE_NAMED, ndata, 0);
368056b2bdd1SGireesh Nagabhushana 	if (ksp == NULL) {
368156b2bdd1SGireesh Nagabhushana 		cxgb_printf(pi->dip, CE_WARN,
368256b2bdd1SGireesh Nagabhushana 		    "%s: failed to initialize rxq kstats for queue %d.",
368356b2bdd1SGireesh Nagabhushana 		    __func__, idx);
368456b2bdd1SGireesh Nagabhushana 		return (NULL);
368556b2bdd1SGireesh Nagabhushana 	}
368656b2bdd1SGireesh Nagabhushana 
368756b2bdd1SGireesh Nagabhushana 	kstatp = (struct rxq_kstats *)ksp->ks_data;
368856b2bdd1SGireesh Nagabhushana 
368956b2bdd1SGireesh Nagabhushana 	KS_UINIT(rxcsum);
36903dde7c95SVishal Kulkarni 	KS_UINIT(rxpkts);
36913dde7c95SVishal Kulkarni 	KS_UINIT(rxbytes);
369256b2bdd1SGireesh Nagabhushana 	KS_UINIT(nomem);
369356b2bdd1SGireesh Nagabhushana 
369456b2bdd1SGireesh Nagabhushana 	ksp->ks_update = update_rxq_kstats;
369556b2bdd1SGireesh Nagabhushana 	ksp->ks_private = (void *)rxq;
369656b2bdd1SGireesh Nagabhushana 	kstat_install(ksp);
369756b2bdd1SGireesh Nagabhushana 
369856b2bdd1SGireesh Nagabhushana 	return (ksp);
369956b2bdd1SGireesh Nagabhushana }
370056b2bdd1SGireesh Nagabhushana 
370156b2bdd1SGireesh Nagabhushana static int
update_rxq_kstats(kstat_t * ksp,int rw)370256b2bdd1SGireesh Nagabhushana update_rxq_kstats(kstat_t *ksp, int rw)
370356b2bdd1SGireesh Nagabhushana {
370456b2bdd1SGireesh Nagabhushana 	struct rxq_kstats *kstatp = (struct rxq_kstats *)ksp->ks_data;
370556b2bdd1SGireesh Nagabhushana 	struct sge_rxq *rxq = ksp->ks_private;
370656b2bdd1SGireesh Nagabhushana 
370756b2bdd1SGireesh Nagabhushana 	if (rw == KSTAT_WRITE)
370856b2bdd1SGireesh Nagabhushana 		return (0);
370956b2bdd1SGireesh Nagabhushana 
371056b2bdd1SGireesh Nagabhushana 	KS_U_FROM(rxcsum, rxq);
37113dde7c95SVishal Kulkarni 	KS_U_FROM(rxpkts, rxq);
37123dde7c95SVishal Kulkarni 	KS_U_FROM(rxbytes, rxq);
371356b2bdd1SGireesh Nagabhushana 	KS_U_FROM(nomem, rxq);
371456b2bdd1SGireesh Nagabhushana 
371556b2bdd1SGireesh Nagabhushana 	return (0);
371656b2bdd1SGireesh Nagabhushana }
371756b2bdd1SGireesh Nagabhushana 
371856b2bdd1SGireesh Nagabhushana /*
371956b2bdd1SGireesh Nagabhushana  * cxgbe:X:txqY
372056b2bdd1SGireesh Nagabhushana  */
372156b2bdd1SGireesh Nagabhushana struct txq_kstats {
372256b2bdd1SGireesh Nagabhushana 	kstat_named_t txcsum;
372356b2bdd1SGireesh Nagabhushana 	kstat_named_t tso_wrs;
372456b2bdd1SGireesh Nagabhushana 	kstat_named_t imm_wrs;
372556b2bdd1SGireesh Nagabhushana 	kstat_named_t sgl_wrs;
372656b2bdd1SGireesh Nagabhushana 	kstat_named_t txpkt_wrs;
372756b2bdd1SGireesh Nagabhushana 	kstat_named_t txpkts_wrs;
372856b2bdd1SGireesh Nagabhushana 	kstat_named_t txpkts_pkts;
372956b2bdd1SGireesh Nagabhushana 	kstat_named_t txb_used;
373056b2bdd1SGireesh Nagabhushana 	kstat_named_t hdl_used;
373156b2bdd1SGireesh Nagabhushana 	kstat_named_t txb_full;
373256b2bdd1SGireesh Nagabhushana 	kstat_named_t dma_hdl_failed;
373356b2bdd1SGireesh Nagabhushana 	kstat_named_t dma_map_failed;
373456b2bdd1SGireesh Nagabhushana 	kstat_named_t qfull;
373556b2bdd1SGireesh Nagabhushana 	kstat_named_t qflush;
373656b2bdd1SGireesh Nagabhushana 	kstat_named_t pullup_early;
373756b2bdd1SGireesh Nagabhushana 	kstat_named_t pullup_late;
373856b2bdd1SGireesh Nagabhushana 	kstat_named_t pullup_failed;
373956b2bdd1SGireesh Nagabhushana };
374056b2bdd1SGireesh Nagabhushana 
374156b2bdd1SGireesh Nagabhushana static kstat_t *
setup_txq_kstats(struct port_info * pi,struct sge_txq * txq,int idx)374256b2bdd1SGireesh Nagabhushana setup_txq_kstats(struct port_info *pi, struct sge_txq *txq, int idx)
374356b2bdd1SGireesh Nagabhushana {
374456b2bdd1SGireesh Nagabhushana 	struct kstat *ksp;
374556b2bdd1SGireesh Nagabhushana 	struct txq_kstats *kstatp;
374656b2bdd1SGireesh Nagabhushana 	int ndata;
374756b2bdd1SGireesh Nagabhushana 	char str[16];
374856b2bdd1SGireesh Nagabhushana 
374956b2bdd1SGireesh Nagabhushana 	ndata = sizeof (struct txq_kstats) / sizeof (kstat_named_t);
375056b2bdd1SGireesh Nagabhushana 	(void) snprintf(str, sizeof (str), "txq%u", idx);
375156b2bdd1SGireesh Nagabhushana 
375256b2bdd1SGireesh Nagabhushana 	ksp = kstat_create(T4_PORT_NAME, ddi_get_instance(pi->dip), str, "txq",
375356b2bdd1SGireesh Nagabhushana 	    KSTAT_TYPE_NAMED, ndata, 0);
375456b2bdd1SGireesh Nagabhushana 	if (ksp == NULL) {
375556b2bdd1SGireesh Nagabhushana 		cxgb_printf(pi->dip, CE_WARN,
375656b2bdd1SGireesh Nagabhushana 		    "%s: failed to initialize txq kstats for queue %d.",
375756b2bdd1SGireesh Nagabhushana 		    __func__, idx);
375856b2bdd1SGireesh Nagabhushana 		return (NULL);
375956b2bdd1SGireesh Nagabhushana 	}
376056b2bdd1SGireesh Nagabhushana 
376156b2bdd1SGireesh Nagabhushana 	kstatp = (struct txq_kstats *)ksp->ks_data;
376256b2bdd1SGireesh Nagabhushana 
376356b2bdd1SGireesh Nagabhushana 	KS_UINIT(txcsum);
376456b2bdd1SGireesh Nagabhushana 	KS_UINIT(tso_wrs);
376556b2bdd1SGireesh Nagabhushana 	KS_UINIT(imm_wrs);
376656b2bdd1SGireesh Nagabhushana 	KS_UINIT(sgl_wrs);
376756b2bdd1SGireesh Nagabhushana 	KS_UINIT(txpkt_wrs);
376856b2bdd1SGireesh Nagabhushana 	KS_UINIT(txpkts_wrs);
376956b2bdd1SGireesh Nagabhushana 	KS_UINIT(txpkts_pkts);
377056b2bdd1SGireesh Nagabhushana 	KS_UINIT(txb_used);
377156b2bdd1SGireesh Nagabhushana 	KS_UINIT(hdl_used);
377256b2bdd1SGireesh Nagabhushana 	KS_UINIT(txb_full);
377356b2bdd1SGireesh Nagabhushana 	KS_UINIT(dma_hdl_failed);
377456b2bdd1SGireesh Nagabhushana 	KS_UINIT(dma_map_failed);
377556b2bdd1SGireesh Nagabhushana 	KS_UINIT(qfull);
377656b2bdd1SGireesh Nagabhushana 	KS_UINIT(qflush);
377756b2bdd1SGireesh Nagabhushana 	KS_UINIT(pullup_early);
377856b2bdd1SGireesh Nagabhushana 	KS_UINIT(pullup_late);
377956b2bdd1SGireesh Nagabhushana 	KS_UINIT(pullup_failed);
378056b2bdd1SGireesh Nagabhushana 
378156b2bdd1SGireesh Nagabhushana 	ksp->ks_update = update_txq_kstats;
378256b2bdd1SGireesh Nagabhushana 	ksp->ks_private = (void *)txq;
378356b2bdd1SGireesh Nagabhushana 	kstat_install(ksp);
378456b2bdd1SGireesh Nagabhushana 
378556b2bdd1SGireesh Nagabhushana 	return (ksp);
378656b2bdd1SGireesh Nagabhushana }
378756b2bdd1SGireesh Nagabhushana 
378856b2bdd1SGireesh Nagabhushana static int
update_txq_kstats(kstat_t * ksp,int rw)378956b2bdd1SGireesh Nagabhushana update_txq_kstats(kstat_t *ksp, int rw)
379056b2bdd1SGireesh Nagabhushana {
379156b2bdd1SGireesh Nagabhushana 	struct txq_kstats *kstatp = (struct txq_kstats *)ksp->ks_data;
379256b2bdd1SGireesh Nagabhushana 	struct sge_txq *txq = ksp->ks_private;
379356b2bdd1SGireesh Nagabhushana 
379456b2bdd1SGireesh Nagabhushana 	if (rw == KSTAT_WRITE)
379556b2bdd1SGireesh Nagabhushana 		return (0);
379656b2bdd1SGireesh Nagabhushana 
379756b2bdd1SGireesh Nagabhushana 	KS_U_FROM(txcsum, txq);
379856b2bdd1SGireesh Nagabhushana 	KS_U_FROM(tso_wrs, txq);
379956b2bdd1SGireesh Nagabhushana 	KS_U_FROM(imm_wrs, txq);
380056b2bdd1SGireesh Nagabhushana 	KS_U_FROM(sgl_wrs, txq);
380156b2bdd1SGireesh Nagabhushana 	KS_U_FROM(txpkt_wrs, txq);
380256b2bdd1SGireesh Nagabhushana 	KS_U_FROM(txpkts_wrs, txq);
380356b2bdd1SGireesh Nagabhushana 	KS_U_FROM(txpkts_pkts, txq);
380456b2bdd1SGireesh Nagabhushana 	KS_U_FROM(txb_used, txq);
380556b2bdd1SGireesh Nagabhushana 	KS_U_FROM(hdl_used, txq);
380656b2bdd1SGireesh Nagabhushana 	KS_U_FROM(txb_full, txq);
380756b2bdd1SGireesh Nagabhushana 	KS_U_FROM(dma_hdl_failed, txq);
380856b2bdd1SGireesh Nagabhushana 	KS_U_FROM(dma_map_failed, txq);
380956b2bdd1SGireesh Nagabhushana 	KS_U_FROM(qfull, txq);
381056b2bdd1SGireesh Nagabhushana 	KS_U_FROM(qflush, txq);
381156b2bdd1SGireesh Nagabhushana 	KS_U_FROM(pullup_early, txq);
381256b2bdd1SGireesh Nagabhushana 	KS_U_FROM(pullup_late, txq);
381356b2bdd1SGireesh Nagabhushana 	KS_U_FROM(pullup_failed, txq);
381456b2bdd1SGireesh Nagabhushana 
381556b2bdd1SGireesh Nagabhushana 	return (0);
381656b2bdd1SGireesh Nagabhushana }
3817