1bafec742SSukumar Swaminathan /*
2bafec742SSukumar Swaminathan  * CDDL HEADER START
3bafec742SSukumar Swaminathan  *
4bafec742SSukumar Swaminathan  * The contents of this file are subject to the terms of the
5bafec742SSukumar Swaminathan  * Common Development and Distribution License (the "License").
6bafec742SSukumar Swaminathan  * You may not use this file except in compliance with the License.
7bafec742SSukumar Swaminathan  *
8bafec742SSukumar Swaminathan  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9bafec742SSukumar Swaminathan  * or http://www.opensolaris.org/os/licensing.
10bafec742SSukumar Swaminathan  * See the License for the specific language governing permissions
11bafec742SSukumar Swaminathan  * and limitations under the License.
12bafec742SSukumar Swaminathan  *
13bafec742SSukumar Swaminathan  * When distributing Covered Code, include this CDDL HEADER in each
14bafec742SSukumar Swaminathan  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15bafec742SSukumar Swaminathan  * If applicable, add the following below this CDDL HEADER, with the
16bafec742SSukumar Swaminathan  * fields enclosed by brackets "[]" replaced with your own identifying
17bafec742SSukumar Swaminathan  * information: Portions Copyright [yyyy] [name of copyright owner]
18bafec742SSukumar Swaminathan  *
19bafec742SSukumar Swaminathan  * CDDL HEADER END
20bafec742SSukumar Swaminathan  */
21bafec742SSukumar Swaminathan 
22bafec742SSukumar Swaminathan /*
23accf27a5SSukumar Swaminathan  * Copyright 2010 QLogic Corporation. All rights reserved.
24bafec742SSukumar Swaminathan  */
25bafec742SSukumar Swaminathan 
2615c07adcSJohn Levon /*
2715c07adcSJohn Levon  * Copyright (c) 2018, Joyent, Inc.
2815c07adcSJohn Levon  */
2915c07adcSJohn Levon 
30bafec742SSukumar Swaminathan #include <qlge.h>
31bafec742SSukumar Swaminathan #include <sys/atomic.h>
32bafec742SSukumar Swaminathan #include <sys/strsubr.h>
33bafec742SSukumar Swaminathan #include <sys/pattr.h>
34bafec742SSukumar Swaminathan #include <netinet/in.h>
35bafec742SSukumar Swaminathan #include <netinet/ip.h>
36bafec742SSukumar Swaminathan #include <netinet/ip6.h>
37bafec742SSukumar Swaminathan #include <netinet/tcp.h>
38bafec742SSukumar Swaminathan #include <netinet/udp.h>
39bafec742SSukumar Swaminathan #include <inet/ip.h>
40bafec742SSukumar Swaminathan 
41bafec742SSukumar Swaminathan 
42bafec742SSukumar Swaminathan 
43bafec742SSukumar Swaminathan /*
44bafec742SSukumar Swaminathan  * Local variables
45bafec742SSukumar Swaminathan  */
46bafec742SSukumar Swaminathan static struct ether_addr ql_ether_broadcast_addr =
47bafec742SSukumar Swaminathan 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
48accf27a5SSukumar Swaminathan static char version[] = "GLDv3 QLogic 81XX " VERSIONSTR;
49bafec742SSukumar Swaminathan 
50bafec742SSukumar Swaminathan /*
51bafec742SSukumar Swaminathan  * Local function prototypes
52bafec742SSukumar Swaminathan  */
53accf27a5SSukumar Swaminathan static void ql_free_resources(qlge_t *);
54bafec742SSukumar Swaminathan static void ql_fini_kstats(qlge_t *);
55bafec742SSukumar Swaminathan static uint32_t ql_get_link_state(qlge_t *);
56bafec742SSukumar Swaminathan static void ql_read_conf(qlge_t *);
57bafec742SSukumar Swaminathan static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
58bafec742SSukumar Swaminathan     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
59bafec742SSukumar Swaminathan     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
60accf27a5SSukumar Swaminathan static int ql_alloc_phys_rbuf(dev_info_t *, ddi_dma_handle_t *,
61accf27a5SSukumar Swaminathan     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
62accf27a5SSukumar Swaminathan     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
63bafec742SSukumar Swaminathan static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
64bafec742SSukumar Swaminathan static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
65bafec742SSukumar Swaminathan static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
66bafec742SSukumar Swaminathan static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
67bafec742SSukumar Swaminathan static int ql_bringdown_adapter(qlge_t *);
68bafec742SSukumar Swaminathan static int ql_bringup_adapter(qlge_t *);
69bafec742SSukumar Swaminathan static int ql_asic_reset(qlge_t *);
70bafec742SSukumar Swaminathan static void ql_wake_mpi_reset_soft_intr(qlge_t *);
71bafec742SSukumar Swaminathan static void ql_stop_timer(qlge_t *qlge);
72accf27a5SSukumar Swaminathan static void ql_fm_fini(qlge_t *qlge);
73accf27a5SSukumar Swaminathan int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring);
74bafec742SSukumar Swaminathan 
75bafec742SSukumar Swaminathan /*
76bafec742SSukumar Swaminathan  * TX dma maping handlers allow multiple sscatter-gather lists
77bafec742SSukumar Swaminathan  */
78bafec742SSukumar Swaminathan ddi_dma_attr_t  tx_mapping_dma_attr = {
79bafec742SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
80bafec742SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
81bafec742SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
82bafec742SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
83bafec742SSukumar Swaminathan 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
84bafec742SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
85bafec742SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
86bafec742SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
87bafec742SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
88bafec742SSukumar Swaminathan 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
89bafec742SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
90accf27a5SSukumar Swaminathan 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
91bafec742SSukumar Swaminathan };
92bafec742SSukumar Swaminathan 
93bafec742SSukumar Swaminathan /*
94bafec742SSukumar Swaminathan  * Receive buffers and Request/Response queues do not allow scatter-gather lists
95bafec742SSukumar Swaminathan  */
96bafec742SSukumar Swaminathan ddi_dma_attr_t  dma_attr = {
97bafec742SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
98bafec742SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
99bafec742SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
100bafec742SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
101bafec742SSukumar Swaminathan 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
102bafec742SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
103bafec742SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
104bafec742SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
105bafec742SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
106bafec742SSukumar Swaminathan 	1,				/* s/g list length, i.e no sg list */
107bafec742SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
108bafec742SSukumar Swaminathan 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
109bafec742SSukumar Swaminathan };
110accf27a5SSukumar Swaminathan /*
111accf27a5SSukumar Swaminathan  * Receive buffers do not allow scatter-gather lists
112accf27a5SSukumar Swaminathan  */
113accf27a5SSukumar Swaminathan ddi_dma_attr_t  dma_attr_rbuf = {
114accf27a5SSukumar Swaminathan 	DMA_ATTR_V0,			/* dma_attr_version */
115accf27a5SSukumar Swaminathan 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
116accf27a5SSukumar Swaminathan 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
117accf27a5SSukumar Swaminathan 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
118accf27a5SSukumar Swaminathan 	0x1,				/* DMA address alignment, default - 8 */
119accf27a5SSukumar Swaminathan 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
120accf27a5SSukumar Swaminathan 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
121accf27a5SSukumar Swaminathan 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
122accf27a5SSukumar Swaminathan 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
123accf27a5SSukumar Swaminathan 	1,				/* s/g list length, i.e no sg list */
124accf27a5SSukumar Swaminathan 	QL_DMA_GRANULARITY,		/* granularity of device */
125accf27a5SSukumar Swaminathan 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
126accf27a5SSukumar Swaminathan };
127bafec742SSukumar Swaminathan /*
128bafec742SSukumar Swaminathan  * DMA access attribute structure.
129bafec742SSukumar Swaminathan  */
130bafec742SSukumar Swaminathan /* device register access from host */
131bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_dev_acc_attr = {
132bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
133bafec742SSukumar Swaminathan 	DDI_STRUCTURE_LE_ACC,
134bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
135bafec742SSukumar Swaminathan };
136bafec742SSukumar Swaminathan 
137bafec742SSukumar Swaminathan /* host ring descriptors */
138bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_desc_acc_attr = {
139bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
140bafec742SSukumar Swaminathan 	DDI_NEVERSWAP_ACC,
141bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
142bafec742SSukumar Swaminathan };
143bafec742SSukumar Swaminathan 
144bafec742SSukumar Swaminathan /* host ring buffer */
145bafec742SSukumar Swaminathan ddi_device_acc_attr_t ql_buf_acc_attr = {
146bafec742SSukumar Swaminathan 	DDI_DEVICE_ATTR_V0,
147bafec742SSukumar Swaminathan 	DDI_NEVERSWAP_ACC,
148bafec742SSukumar Swaminathan 	DDI_STRICTORDER_ACC
149bafec742SSukumar Swaminathan };
150bafec742SSukumar Swaminathan 
151bafec742SSukumar Swaminathan /*
152bafec742SSukumar Swaminathan  * Hash key table for Receive Side Scaling (RSS) support
153bafec742SSukumar Swaminathan  */
154bafec742SSukumar Swaminathan const uint8_t key_data[] = {
155bafec742SSukumar Swaminathan 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
156bafec742SSukumar Swaminathan 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
157bafec742SSukumar Swaminathan 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
158bafec742SSukumar Swaminathan 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
159bafec742SSukumar Swaminathan 
160bafec742SSukumar Swaminathan /*
161bafec742SSukumar Swaminathan  * Shadow Registers:
162bafec742SSukumar Swaminathan  * Outbound queues have a consumer index that is maintained by the chip.
163bafec742SSukumar Swaminathan  * Inbound queues have a producer index that is maintained by the chip.
164bafec742SSukumar Swaminathan  * For lower overhead, these registers are "shadowed" to host memory
165bafec742SSukumar Swaminathan  * which allows the device driver to track the queue progress without
166bafec742SSukumar Swaminathan  * PCI reads. When an entry is placed on an inbound queue, the chip will
167bafec742SSukumar Swaminathan  * update the relevant index register and then copy the value to the
168bafec742SSukumar Swaminathan  * shadow register in host memory.
169accf27a5SSukumar Swaminathan  * Currently, ql_read_sh_reg only read Inbound queues'producer index.
170bafec742SSukumar Swaminathan  */
171bafec742SSukumar Swaminathan 
172bafec742SSukumar Swaminathan static inline unsigned int
ql_read_sh_reg(qlge_t * qlge,struct rx_ring * rx_ring)173accf27a5SSukumar Swaminathan ql_read_sh_reg(qlge_t *qlge, struct rx_ring *rx_ring)
174bafec742SSukumar Swaminathan {
175accf27a5SSukumar Swaminathan 	uint32_t rtn;
176accf27a5SSukumar Swaminathan 
177accf27a5SSukumar Swaminathan 	/* re-synchronize shadow prod index dma buffer before reading */
178accf27a5SSukumar Swaminathan 	(void) ddi_dma_sync(qlge->host_copy_shadow_dma_attr.dma_handle,
179accf27a5SSukumar Swaminathan 	    rx_ring->prod_idx_sh_reg_offset,
180accf27a5SSukumar Swaminathan 	    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
181accf27a5SSukumar Swaminathan 
182accf27a5SSukumar Swaminathan 	rtn = ddi_get32(qlge->host_copy_shadow_dma_attr.acc_handle,
183accf27a5SSukumar Swaminathan 	    (uint32_t *)rx_ring->prod_idx_sh_reg);
184accf27a5SSukumar Swaminathan 
185accf27a5SSukumar Swaminathan 	return (rtn);
186bafec742SSukumar Swaminathan }
187bafec742SSukumar Swaminathan 
188bafec742SSukumar Swaminathan /*
189bafec742SSukumar Swaminathan  * Read 32 bit atomically
190bafec742SSukumar Swaminathan  */
191bafec742SSukumar Swaminathan uint32_t
ql_atomic_read_32(volatile uint32_t * target)192bafec742SSukumar Swaminathan ql_atomic_read_32(volatile uint32_t *target)
193bafec742SSukumar Swaminathan {
194bafec742SSukumar Swaminathan 	/*
195bafec742SSukumar Swaminathan 	 * atomic_add_32_nv returns the new value after the add,
196bafec742SSukumar Swaminathan 	 * we are adding 0 so we should get the original value
197bafec742SSukumar Swaminathan 	 */
198bafec742SSukumar Swaminathan 	return (atomic_add_32_nv(target, 0));
199bafec742SSukumar Swaminathan }
200bafec742SSukumar Swaminathan 
201bafec742SSukumar Swaminathan /*
202bafec742SSukumar Swaminathan  * Set 32 bit atomically
203bafec742SSukumar Swaminathan  */
204bafec742SSukumar Swaminathan void
ql_atomic_set_32(volatile uint32_t * target,uint32_t newval)205bafec742SSukumar Swaminathan ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
206bafec742SSukumar Swaminathan {
207bafec742SSukumar Swaminathan 	(void) atomic_swap_32(target, newval);
208bafec742SSukumar Swaminathan }
209bafec742SSukumar Swaminathan 
210bafec742SSukumar Swaminathan 
211bafec742SSukumar Swaminathan /*
212bafec742SSukumar Swaminathan  * Setup device PCI configuration registers.
213bafec742SSukumar Swaminathan  * Kernel context.
214bafec742SSukumar Swaminathan  */
215bafec742SSukumar Swaminathan static void
ql_pci_config(qlge_t * qlge)216bafec742SSukumar Swaminathan ql_pci_config(qlge_t *qlge)
217bafec742SSukumar Swaminathan {
218bafec742SSukumar Swaminathan 	uint16_t w;
219bafec742SSukumar Swaminathan 
220bafec742SSukumar Swaminathan 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
221bafec742SSukumar Swaminathan 	    PCI_CONF_VENID);
222bafec742SSukumar Swaminathan 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
223bafec742SSukumar Swaminathan 	    PCI_CONF_DEVID);
224bafec742SSukumar Swaminathan 
225bafec742SSukumar Swaminathan 	/*
226bafec742SSukumar Swaminathan 	 * we want to respect framework's setting of PCI
227bafec742SSukumar Swaminathan 	 * configuration space command register and also
228bafec742SSukumar Swaminathan 	 * want to make sure that all bits of interest to us
229bafec742SSukumar Swaminathan 	 * are properly set in PCI Command register(0x04).
230bafec742SSukumar Swaminathan 	 * PCI_COMM_IO		0x1	 I/O access enable
231bafec742SSukumar Swaminathan 	 * PCI_COMM_MAE		0x2	 Memory access enable
232bafec742SSukumar Swaminathan 	 * PCI_COMM_ME		0x4	 bus master enable
233bafec742SSukumar Swaminathan 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
234bafec742SSukumar Swaminathan 	 */
235bafec742SSukumar Swaminathan 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
236bafec742SSukumar Swaminathan 	w = (uint16_t)(w & (~PCI_COMM_IO));
237bafec742SSukumar Swaminathan 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
238bafec742SSukumar Swaminathan 	    /* PCI_COMM_MEMWR_INVAL | */
239bafec742SSukumar Swaminathan 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
240bafec742SSukumar Swaminathan 
241bafec742SSukumar Swaminathan 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
242bafec742SSukumar Swaminathan 
243accf27a5SSukumar Swaminathan 	w = pci_config_get16(qlge->pci_handle, 0x54);
244accf27a5SSukumar Swaminathan 	w = (uint16_t)(w & (~0x7000));
245accf27a5SSukumar Swaminathan 	w = (uint16_t)(w | 0x5000);
246accf27a5SSukumar Swaminathan 	pci_config_put16(qlge->pci_handle, 0x54, w);
247accf27a5SSukumar Swaminathan 
248bafec742SSukumar Swaminathan 	ql_dump_pci_config(qlge);
249bafec742SSukumar Swaminathan }
250bafec742SSukumar Swaminathan 
251bafec742SSukumar Swaminathan /*
252bafec742SSukumar Swaminathan  * This routine parforms the neccessary steps to set GLD mac information
253bafec742SSukumar Swaminathan  * such as Function number, xgmac mask and shift bits
254bafec742SSukumar Swaminathan  */
255bafec742SSukumar Swaminathan static int
ql_set_mac_info(qlge_t * qlge)256bafec742SSukumar Swaminathan ql_set_mac_info(qlge_t *qlge)
257bafec742SSukumar Swaminathan {
258bafec742SSukumar Swaminathan 	uint32_t value;
259accf27a5SSukumar Swaminathan 	int rval = DDI_FAILURE;
260bafec742SSukumar Swaminathan 	uint32_t fn0_net, fn1_net;
261bafec742SSukumar Swaminathan 
262bafec742SSukumar Swaminathan 	/* set default value */
263bafec742SSukumar Swaminathan 	qlge->fn0_net = FN0_NET;
264bafec742SSukumar Swaminathan 	qlge->fn1_net = FN1_NET;
265bafec742SSukumar Swaminathan 
266bafec742SSukumar Swaminathan 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
267bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
268bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
269accf27a5SSukumar Swaminathan 		goto exit;
270bafec742SSukumar Swaminathan 	} else {
271bafec742SSukumar Swaminathan 		fn0_net = (value >> 1) & 0x07;
272bafec742SSukumar Swaminathan 		fn1_net = (value >> 5) & 0x07;
273bafec742SSukumar Swaminathan 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
274bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
275bafec742SSukumar Swaminathan 			    "nic0 function number %d,"
276bafec742SSukumar Swaminathan 			    "nic1 function number %d "
277bafec742SSukumar Swaminathan 			    "use default\n",
278bafec742SSukumar Swaminathan 			    __func__, qlge->instance, value, fn0_net, fn1_net);
279accf27a5SSukumar Swaminathan 			goto exit;
280bafec742SSukumar Swaminathan 		} else {
281bafec742SSukumar Swaminathan 			qlge->fn0_net = fn0_net;
282bafec742SSukumar Swaminathan 			qlge->fn1_net = fn1_net;
283bafec742SSukumar Swaminathan 		}
284bafec742SSukumar Swaminathan 	}
285bafec742SSukumar Swaminathan 
286bafec742SSukumar Swaminathan 	/* Get the function number that the driver is associated with */
287bafec742SSukumar Swaminathan 	value = ql_read_reg(qlge, REG_STATUS);
288bafec742SSukumar Swaminathan 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
289bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
290bafec742SSukumar Swaminathan 	    value, qlge->func_number));
291bafec742SSukumar Swaminathan 
292bafec742SSukumar Swaminathan 	/* The driver is loaded on a non-NIC function? */
293bafec742SSukumar Swaminathan 	if ((qlge->func_number != qlge->fn0_net) &&
294bafec742SSukumar Swaminathan 	    (qlge->func_number != qlge->fn1_net)) {
295bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
296bafec742SSukumar Swaminathan 		    "Invalid function number = 0x%x\n", qlge->func_number);
297accf27a5SSukumar Swaminathan 		goto exit;
298bafec742SSukumar Swaminathan 	}
299bafec742SSukumar Swaminathan 	/* network port 0? */
300bafec742SSukumar Swaminathan 	if (qlge->func_number == qlge->fn0_net) {
301bafec742SSukumar Swaminathan 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
302bafec742SSukumar Swaminathan 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
303bafec742SSukumar Swaminathan 	} else {
304bafec742SSukumar Swaminathan 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
305bafec742SSukumar Swaminathan 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
306bafec742SSukumar Swaminathan 	}
307accf27a5SSukumar Swaminathan 	rval = DDI_SUCCESS;
308accf27a5SSukumar Swaminathan exit:
309bafec742SSukumar Swaminathan 	return (rval);
310bafec742SSukumar Swaminathan 
311bafec742SSukumar Swaminathan }
312bafec742SSukumar Swaminathan 
313bafec742SSukumar Swaminathan /*
314bafec742SSukumar Swaminathan  * write to doorbell register
315bafec742SSukumar Swaminathan  */
316bafec742SSukumar Swaminathan void
ql_write_doorbell_reg(qlge_t * qlge,uint32_t * addr,uint32_t data)317bafec742SSukumar Swaminathan ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
318bafec742SSukumar Swaminathan {
319bafec742SSukumar Swaminathan 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
320bafec742SSukumar Swaminathan }
321bafec742SSukumar Swaminathan 
322bafec742SSukumar Swaminathan /*
323bafec742SSukumar Swaminathan  * read from doorbell register
324bafec742SSukumar Swaminathan  */
325bafec742SSukumar Swaminathan uint32_t
ql_read_doorbell_reg(qlge_t * qlge,uint32_t * addr)326bafec742SSukumar Swaminathan ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
327bafec742SSukumar Swaminathan {
328bafec742SSukumar Swaminathan 	uint32_t ret;
329bafec742SSukumar Swaminathan 
330bafec742SSukumar Swaminathan 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
331bafec742SSukumar Swaminathan 
332bafec742SSukumar Swaminathan 	return	(ret);
333bafec742SSukumar Swaminathan }
334bafec742SSukumar Swaminathan 
335bafec742SSukumar Swaminathan /*
336bafec742SSukumar Swaminathan  * This function waits for a specific bit to come ready
337bafec742SSukumar Swaminathan  * in a given register.  It is used mostly by the initialize
338bafec742SSukumar Swaminathan  * process, but is also used in kernel thread API such as
339bafec742SSukumar Swaminathan  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
340bafec742SSukumar Swaminathan  */
341bafec742SSukumar Swaminathan static int
ql_wait_reg_rdy(qlge_t * qlge,uint32_t reg,uint32_t bit,uint32_t err_bit)342bafec742SSukumar Swaminathan ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
343bafec742SSukumar Swaminathan {
344bafec742SSukumar Swaminathan 	uint32_t temp;
345bafec742SSukumar Swaminathan 	int count = UDELAY_COUNT;
346bafec742SSukumar Swaminathan 
347bafec742SSukumar Swaminathan 	while (count) {
348bafec742SSukumar Swaminathan 		temp = ql_read_reg(qlge, reg);
349bafec742SSukumar Swaminathan 
350bafec742SSukumar Swaminathan 		/* check for errors */
351bafec742SSukumar Swaminathan 		if ((temp & err_bit) != 0) {
352bafec742SSukumar Swaminathan 			break;
353bafec742SSukumar Swaminathan 		} else if ((temp & bit) != 0)
354bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
355bafec742SSukumar Swaminathan 		qlge_delay(UDELAY_DELAY);
356bafec742SSukumar Swaminathan 		count--;
357bafec742SSukumar Swaminathan 	}
358bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
359bafec742SSukumar Swaminathan 	    "Waiting for reg %x to come ready failed.", reg);
360accf27a5SSukumar Swaminathan 	if (qlge->fm_enable) {
361accf27a5SSukumar Swaminathan 		ql_fm_ereport(qlge, DDI_FM_DEVICE_NO_RESPONSE);
362accf27a5SSukumar Swaminathan 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
363accf27a5SSukumar Swaminathan 	}
364bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
365bafec742SSukumar Swaminathan }
366bafec742SSukumar Swaminathan 
367bafec742SSukumar Swaminathan /*
368bafec742SSukumar Swaminathan  * The CFG register is used to download TX and RX control blocks
369bafec742SSukumar Swaminathan  * to the chip. This function waits for an operation to complete.
370bafec742SSukumar Swaminathan  */
371bafec742SSukumar Swaminathan static int
ql_wait_cfg(qlge_t * qlge,uint32_t bit)372bafec742SSukumar Swaminathan ql_wait_cfg(qlge_t *qlge, uint32_t bit)
373bafec742SSukumar Swaminathan {
374accf27a5SSukumar Swaminathan 	return (ql_wait_reg_bit(qlge, REG_CONFIGURATION, bit, BIT_RESET, 0));
375bafec742SSukumar Swaminathan }
376bafec742SSukumar Swaminathan 
377bafec742SSukumar Swaminathan 
378bafec742SSukumar Swaminathan /*
379bafec742SSukumar Swaminathan  * Used to issue init control blocks to hw. Maps control block,
380bafec742SSukumar Swaminathan  * sets address, triggers download, waits for completion.
381bafec742SSukumar Swaminathan  */
382bafec742SSukumar Swaminathan static int
ql_write_cfg(qlge_t * qlge,uint32_t bit,uint64_t phy_addr,uint16_t q_id)383bafec742SSukumar Swaminathan ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
384bafec742SSukumar Swaminathan {
385bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
386bafec742SSukumar Swaminathan 	uint32_t mask;
387bafec742SSukumar Swaminathan 	uint32_t value;
388bafec742SSukumar Swaminathan 
389bafec742SSukumar Swaminathan 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
390bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS) {
391bafec742SSukumar Swaminathan 		goto exit;
392bafec742SSukumar Swaminathan 	}
393bafec742SSukumar Swaminathan 	status = ql_wait_cfg(qlge, bit);
394bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS) {
395bafec742SSukumar Swaminathan 		goto exit;
396bafec742SSukumar Swaminathan 	}
397bafec742SSukumar Swaminathan 
398bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
399bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
400bafec742SSukumar Swaminathan 
401bafec742SSukumar Swaminathan 	mask = CFG_Q_MASK | (bit << 16);
402bafec742SSukumar Swaminathan 	value = bit | (q_id << CFG_Q_SHIFT);
403bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
404bafec742SSukumar Swaminathan 
405bafec742SSukumar Swaminathan 	/*
406bafec742SSukumar Swaminathan 	 * Wait for the bit to clear after signaling hw.
407bafec742SSukumar Swaminathan 	 */
408bafec742SSukumar Swaminathan 	status = ql_wait_cfg(qlge, bit);
409bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
410bafec742SSukumar Swaminathan 
411bafec742SSukumar Swaminathan exit:
412bafec742SSukumar Swaminathan 	return (status);
413bafec742SSukumar Swaminathan }
414bafec742SSukumar Swaminathan 
415bafec742SSukumar Swaminathan /*
416bafec742SSukumar Swaminathan  * Initialize adapter instance
417bafec742SSukumar Swaminathan  */
418bafec742SSukumar Swaminathan static int
ql_init_instance(qlge_t * qlge)419bafec742SSukumar Swaminathan ql_init_instance(qlge_t *qlge)
420bafec742SSukumar Swaminathan {
421bafec742SSukumar Swaminathan 	int i;
422bafec742SSukumar Swaminathan 
423bafec742SSukumar Swaminathan 	/* Default value */
424bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_INIT;
425bafec742SSukumar Swaminathan 	qlge->mtu = ETHERMTU;		/* set normal size as default */
426bafec742SSukumar Swaminathan 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
427bafec742SSukumar Swaminathan 
428bafec742SSukumar Swaminathan 	for (i = 0; i < MAX_RX_RINGS; i++) {
429bafec742SSukumar Swaminathan 		qlge->rx_polls[i] = 0;
430bafec742SSukumar Swaminathan 		qlge->rx_interrupts[i] = 0;
431bafec742SSukumar Swaminathan 	}
432bafec742SSukumar Swaminathan 
433bafec742SSukumar Swaminathan 	/*
434bafec742SSukumar Swaminathan 	 * Set up the operating parameters.
435bafec742SSukumar Swaminathan 	 */
436bafec742SSukumar Swaminathan 	qlge->multicast_list_count = 0;
437bafec742SSukumar Swaminathan 
438bafec742SSukumar Swaminathan 	/*
439bafec742SSukumar Swaminathan 	 * Set up the max number of unicast list
440bafec742SSukumar Swaminathan 	 */
441bafec742SSukumar Swaminathan 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
442bafec742SSukumar Swaminathan 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
443bafec742SSukumar Swaminathan 
444bafec742SSukumar Swaminathan 	/*
445bafec742SSukumar Swaminathan 	 * read user defined properties in .conf file
446bafec742SSukumar Swaminathan 	 */
447bafec742SSukumar Swaminathan 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
448accf27a5SSukumar Swaminathan 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
449bafec742SSukumar Swaminathan 
450bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
451bafec742SSukumar Swaminathan 
452bafec742SSukumar Swaminathan 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
453bafec742SSukumar Swaminathan 	ql_pci_config(qlge);
454bafec742SSukumar Swaminathan 	qlge->ip_hdr_offset = 0;
455bafec742SSukumar Swaminathan 
456bafec742SSukumar Swaminathan 	if (qlge->device_id == 0x8000) {
457bafec742SSukumar Swaminathan 		/* Schultz card */
458bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_CHIP_8100;
459bafec742SSukumar Swaminathan 		/* enable just ipv4 chksum offload for Schultz */
460bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
461bafec742SSukumar Swaminathan 		/*
462bafec742SSukumar Swaminathan 		 * Schultz firmware does not do pseduo IP header checksum
463bafec742SSukumar Swaminathan 		 * calculation, needed to be done by driver
464bafec742SSukumar Swaminathan 		 */
465bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
466bafec742SSukumar Swaminathan 		if (qlge->lso_enable)
467bafec742SSukumar Swaminathan 			qlge->cfg_flags |= CFG_LSO;
468bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
469bafec742SSukumar Swaminathan 		/* Schultz must split packet header */
470bafec742SSukumar Swaminathan 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
471bafec742SSukumar Swaminathan 		qlge->max_read_mbx = 5;
472bafec742SSukumar Swaminathan 		qlge->ip_hdr_offset = 2;
473bafec742SSukumar Swaminathan 	}
474bafec742SSukumar Swaminathan 
475bafec742SSukumar Swaminathan 	/* Set Function Number and some of the iocb mac information */
476bafec742SSukumar Swaminathan 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
477bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
478bafec742SSukumar Swaminathan 
479bafec742SSukumar Swaminathan 	/* Read network settings from NVRAM */
480bafec742SSukumar Swaminathan 	/* After nvram is read successfully, update dev_addr */
481bafec742SSukumar Swaminathan 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
482bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
483bafec742SSukumar Swaminathan 		for (i = 0; i < ETHERADDRL; i++) {
484bafec742SSukumar Swaminathan 			qlge->dev_addr.ether_addr_octet[i] =
485bafec742SSukumar Swaminathan 			    qlge->nic_config.factory_MAC[i];
486bafec742SSukumar Swaminathan 		}
487bafec742SSukumar Swaminathan 	} else {
488bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
489bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
490bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
491bafec742SSukumar Swaminathan 	}
492bafec742SSukumar Swaminathan 
493bafec742SSukumar Swaminathan 	bcopy(qlge->dev_addr.ether_addr_octet,
494bafec742SSukumar Swaminathan 	    qlge->unicst_addr[0].addr.ether_addr_octet,
495bafec742SSukumar Swaminathan 	    ETHERADDRL);
496bafec742SSukumar Swaminathan 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
497bafec742SSukumar Swaminathan 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
498bafec742SSukumar Swaminathan 
499bafec742SSukumar Swaminathan 	qlge->port_link_state = LS_DOWN;
500bafec742SSukumar Swaminathan 
501bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
502bafec742SSukumar Swaminathan }
503bafec742SSukumar Swaminathan 
504bafec742SSukumar Swaminathan 
505bafec742SSukumar Swaminathan /*
506bafec742SSukumar Swaminathan  * This hardware semaphore provides the mechanism for exclusive access to
507bafec742SSukumar Swaminathan  * resources shared between the NIC driver, MPI firmware,
508bafec742SSukumar Swaminathan  * FCOE firmware and the FC driver.
509bafec742SSukumar Swaminathan  */
510bafec742SSukumar Swaminathan static int
ql_sem_trylock(qlge_t * qlge,uint32_t sem_mask)511bafec742SSukumar Swaminathan ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
512bafec742SSukumar Swaminathan {
513bafec742SSukumar Swaminathan 	uint32_t sem_bits = 0;
514bafec742SSukumar Swaminathan 
515bafec742SSukumar Swaminathan 	switch (sem_mask) {
516bafec742SSukumar Swaminathan 	case SEM_XGMAC0_MASK:
517bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
518bafec742SSukumar Swaminathan 		break;
519bafec742SSukumar Swaminathan 	case SEM_XGMAC1_MASK:
520bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
521bafec742SSukumar Swaminathan 		break;
522bafec742SSukumar Swaminathan 	case SEM_ICB_MASK:
523bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
524bafec742SSukumar Swaminathan 		break;
525bafec742SSukumar Swaminathan 	case SEM_MAC_ADDR_MASK:
526bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
527bafec742SSukumar Swaminathan 		break;
528bafec742SSukumar Swaminathan 	case SEM_FLASH_MASK:
529bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
530bafec742SSukumar Swaminathan 		break;
531bafec742SSukumar Swaminathan 	case SEM_PROBE_MASK:
532bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
533bafec742SSukumar Swaminathan 		break;
534bafec742SSukumar Swaminathan 	case SEM_RT_IDX_MASK:
535bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
536bafec742SSukumar Swaminathan 		break;
537bafec742SSukumar Swaminathan 	case SEM_PROC_REG_MASK:
538bafec742SSukumar Swaminathan 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
539bafec742SSukumar Swaminathan 		break;
540bafec742SSukumar Swaminathan 	default:
541bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
542bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
543bafec742SSukumar Swaminathan 	}
544bafec742SSukumar Swaminathan 
545bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
546bafec742SSukumar Swaminathan 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
547bafec742SSukumar Swaminathan }
548bafec742SSukumar Swaminathan 
549bafec742SSukumar Swaminathan /*
550bafec742SSukumar Swaminathan  * Lock a specific bit of Semaphore register to gain
551bafec742SSukumar Swaminathan  * access to a particular shared register
552bafec742SSukumar Swaminathan  */
553bafec742SSukumar Swaminathan int
ql_sem_spinlock(qlge_t * qlge,uint32_t sem_mask)554bafec742SSukumar Swaminathan ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
555bafec742SSukumar Swaminathan {
556bafec742SSukumar Swaminathan 	unsigned int wait_count = 30;
557bafec742SSukumar Swaminathan 
558bafec742SSukumar Swaminathan 	while (wait_count) {
559bafec742SSukumar Swaminathan 		if (!ql_sem_trylock(qlge, sem_mask))
560bafec742SSukumar Swaminathan 			return (DDI_SUCCESS);
561bafec742SSukumar Swaminathan 		qlge_delay(100);
562bafec742SSukumar Swaminathan 		wait_count--;
563bafec742SSukumar Swaminathan 	}
564bafec742SSukumar Swaminathan 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
565bafec742SSukumar Swaminathan 	    __func__, qlge->instance, sem_mask);
566bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
567bafec742SSukumar Swaminathan }
568bafec742SSukumar Swaminathan 
569bafec742SSukumar Swaminathan /*
570bafec742SSukumar Swaminathan  * Unock a specific bit of Semaphore register to release
571bafec742SSukumar Swaminathan  * access to a particular shared register
572bafec742SSukumar Swaminathan  */
573bafec742SSukumar Swaminathan void
ql_sem_unlock(qlge_t * qlge,uint32_t sem_mask)574bafec742SSukumar Swaminathan ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
575bafec742SSukumar Swaminathan {
576bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
5770662fbf4SSukumar Swaminathan 	(void) ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
578bafec742SSukumar Swaminathan }
579bafec742SSukumar Swaminathan 
580bafec742SSukumar Swaminathan /*
581bafec742SSukumar Swaminathan  * Get property value from configuration file.
582bafec742SSukumar Swaminathan  *
583bafec742SSukumar Swaminathan  * string = property string pointer.
584bafec742SSukumar Swaminathan  *
585bafec742SSukumar Swaminathan  * Returns:
586bafec742SSukumar Swaminathan  * 0xFFFFFFFF = no property else property value.
587bafec742SSukumar Swaminathan  */
588bafec742SSukumar Swaminathan static uint32_t
ql_get_prop(qlge_t * qlge,char * string)589bafec742SSukumar Swaminathan ql_get_prop(qlge_t *qlge, char *string)
590bafec742SSukumar Swaminathan {
591bafec742SSukumar Swaminathan 	char buf[256];
592bafec742SSukumar Swaminathan 	uint32_t data;
593bafec742SSukumar Swaminathan 
594bafec742SSukumar Swaminathan 	/* Get adapter instance parameter. */
595bafec742SSukumar Swaminathan 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
596bafec742SSukumar Swaminathan 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
597bafec742SSukumar Swaminathan 	    (int)0xffffffff);
598bafec742SSukumar Swaminathan 
599bafec742SSukumar Swaminathan 	/* Adapter instance parameter found? */
600bafec742SSukumar Swaminathan 	if (data == 0xffffffff) {
601bafec742SSukumar Swaminathan 		/* No, get default parameter. */
602bafec742SSukumar Swaminathan 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
603bafec742SSukumar Swaminathan 		    string, (int)0xffffffff);
604bafec742SSukumar Swaminathan 	}
605bafec742SSukumar Swaminathan 
606bafec742SSukumar Swaminathan 	return (data);
607bafec742SSukumar Swaminathan }
608bafec742SSukumar Swaminathan 
609bafec742SSukumar Swaminathan /*
610bafec742SSukumar Swaminathan  * Read user setting from configuration file.
611bafec742SSukumar Swaminathan  */
612bafec742SSukumar Swaminathan static void
ql_read_conf(qlge_t * qlge)613bafec742SSukumar Swaminathan ql_read_conf(qlge_t *qlge)
614bafec742SSukumar Swaminathan {
615bafec742SSukumar Swaminathan 	uint32_t data;
616bafec742SSukumar Swaminathan 
617bafec742SSukumar Swaminathan 	/* clear configuration flags */
618bafec742SSukumar Swaminathan 	qlge->cfg_flags = 0;
619bafec742SSukumar Swaminathan 
620accf27a5SSukumar Swaminathan 	/* Set up the default ring sizes. */
621accf27a5SSukumar Swaminathan 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
622accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_ring_size");
623accf27a5SSukumar Swaminathan 	/* if data is valid */
624accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
625accf27a5SSukumar Swaminathan 		if (qlge->tx_ring_size != data) {
626accf27a5SSukumar Swaminathan 			qlge->tx_ring_size = (uint16_t)data;
627accf27a5SSukumar Swaminathan 		}
628accf27a5SSukumar Swaminathan 	}
629accf27a5SSukumar Swaminathan 
630accf27a5SSukumar Swaminathan 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
631accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "rx_ring_size");
632accf27a5SSukumar Swaminathan 	/* if data is valid */
633accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
634accf27a5SSukumar Swaminathan 		if (qlge->rx_ring_size != data) {
635accf27a5SSukumar Swaminathan 			qlge->rx_ring_size = (uint16_t)data;
636accf27a5SSukumar Swaminathan 		}
637accf27a5SSukumar Swaminathan 	}
638accf27a5SSukumar Swaminathan 
639accf27a5SSukumar Swaminathan 	qlge->tx_ring_count = 8;
640accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_ring_count");
641accf27a5SSukumar Swaminathan 	/* if data is valid */
642accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
643accf27a5SSukumar Swaminathan 		if (qlge->tx_ring_count != data) {
644accf27a5SSukumar Swaminathan 			qlge->tx_ring_count = (uint16_t)data;
645accf27a5SSukumar Swaminathan 		}
646accf27a5SSukumar Swaminathan 	}
647accf27a5SSukumar Swaminathan 
648accf27a5SSukumar Swaminathan 	qlge->rss_ring_count = 8;
649accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "rss_ring_count");
650accf27a5SSukumar Swaminathan 	/* if data is valid */
651accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
652accf27a5SSukumar Swaminathan 		if (qlge->rss_ring_count != data) {
653accf27a5SSukumar Swaminathan 			qlge->rss_ring_count = (uint16_t)data;
654accf27a5SSukumar Swaminathan 		}
655accf27a5SSukumar Swaminathan 	}
656accf27a5SSukumar Swaminathan 
657bafec742SSukumar Swaminathan 	/* Get default rx_copy enable/disable. */
658bafec742SSukumar Swaminathan 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
659bafec742SSukumar Swaminathan 	    data == 0) {
660bafec742SSukumar Swaminathan 		qlge->rx_copy = B_FALSE;
661bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
662bafec742SSukumar Swaminathan 	} else if (data == 1) {
663bafec742SSukumar Swaminathan 		qlge->rx_copy = B_TRUE;
664bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
665bafec742SSukumar Swaminathan 	}
666bafec742SSukumar Swaminathan 
667a6766df4SSukumar Swaminathan 	qlge->rx_copy_threshold = qlge->rx_ring_size / 4;
668a6766df4SSukumar Swaminathan 	data = ql_get_prop(qlge, "rx_copy_threshold");
669a6766df4SSukumar Swaminathan 	if ((data != 0xffffffff) && (data != 0)) {
670a6766df4SSukumar Swaminathan 		qlge->rx_copy_threshold = data;
671a6766df4SSukumar Swaminathan 		cmn_err(CE_NOTE, "!new rx_copy_threshold %d \n",
672a6766df4SSukumar Swaminathan 		    qlge->rx_copy_threshold);
673a6766df4SSukumar Swaminathan 	}
674a6766df4SSukumar Swaminathan 
675bafec742SSukumar Swaminathan 	/* Get mtu packet size. */
676bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "mtu");
677bafec742SSukumar Swaminathan 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
678bafec742SSukumar Swaminathan 		if (qlge->mtu != data) {
679bafec742SSukumar Swaminathan 			qlge->mtu = data;
680bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
681bafec742SSukumar Swaminathan 		}
682bafec742SSukumar Swaminathan 	}
683bafec742SSukumar Swaminathan 
684accf27a5SSukumar Swaminathan 	if (qlge->mtu == JUMBO_MTU) {
685accf27a5SSukumar Swaminathan 		qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT_JUMBO;
686accf27a5SSukumar Swaminathan 		qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT_JUMBO;
687accf27a5SSukumar Swaminathan 		qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT_JUMBO;
688accf27a5SSukumar Swaminathan 		qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT_JUMBO;
689accf27a5SSukumar Swaminathan 	}
690accf27a5SSukumar Swaminathan 
691accf27a5SSukumar Swaminathan 
692bafec742SSukumar Swaminathan 	/* Get pause mode, default is Per Priority mode. */
693bafec742SSukumar Swaminathan 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
694bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "pause");
695bafec742SSukumar Swaminathan 	if (data <= PAUSE_MODE_PER_PRIORITY) {
696bafec742SSukumar Swaminathan 		if (qlge->pause != data) {
697bafec742SSukumar Swaminathan 			qlge->pause = data;
698bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
699bafec742SSukumar Swaminathan 		}
700bafec742SSukumar Swaminathan 	}
701accf27a5SSukumar Swaminathan 	/* Receive interrupt delay */
702accf27a5SSukumar Swaminathan 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
703accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "rx_intr_delay");
704accf27a5SSukumar Swaminathan 	/* if data is valid */
705accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
706accf27a5SSukumar Swaminathan 		if (qlge->rx_coalesce_usecs != data) {
707accf27a5SSukumar Swaminathan 			qlge->rx_coalesce_usecs = (uint16_t)data;
708accf27a5SSukumar Swaminathan 		}
709accf27a5SSukumar Swaminathan 	}
710accf27a5SSukumar Swaminathan 	/* Rx inter-packet delay. */
711accf27a5SSukumar Swaminathan 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
712accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "rx_ipkt_delay");
713accf27a5SSukumar Swaminathan 	/* if data is valid */
714accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
715accf27a5SSukumar Swaminathan 		if (qlge->rx_max_coalesced_frames != data) {
716accf27a5SSukumar Swaminathan 			qlge->rx_max_coalesced_frames = (uint16_t)data;
717accf27a5SSukumar Swaminathan 		}
718accf27a5SSukumar Swaminathan 	}
719accf27a5SSukumar Swaminathan 	/* Transmit interrupt delay */
720accf27a5SSukumar Swaminathan 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
721accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_intr_delay");
722accf27a5SSukumar Swaminathan 	/* if data is valid */
723accf27a5SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
724accf27a5SSukumar Swaminathan 		if (qlge->tx_coalesce_usecs != data) {
725accf27a5SSukumar Swaminathan 			qlge->tx_coalesce_usecs = (uint16_t)data;
726accf27a5SSukumar Swaminathan 		}
727accf27a5SSukumar Swaminathan 	}
728accf27a5SSukumar Swaminathan 	/* Tx inter-packet delay. */
729accf27a5SSukumar Swaminathan 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
730accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "tx_ipkt_delay");
731bafec742SSukumar Swaminathan 	/* if data is valid */
732bafec742SSukumar Swaminathan 	if ((data != 0xffffffff) && data) {
733bafec742SSukumar Swaminathan 		if (qlge->tx_max_coalesced_frames != data) {
734bafec742SSukumar Swaminathan 			qlge->tx_max_coalesced_frames = (uint16_t)data;
735bafec742SSukumar Swaminathan 		}
736bafec742SSukumar Swaminathan 	}
737bafec742SSukumar Swaminathan 
738bafec742SSukumar Swaminathan 	/* Get split header payload_copy_thresh. */
739accf27a5SSukumar Swaminathan 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
740bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "payload_copy_thresh");
741bafec742SSukumar Swaminathan 	/* if data is valid */
742bafec742SSukumar Swaminathan 	if ((data != 0xffffffff) && (data != 0)) {
743bafec742SSukumar Swaminathan 		if (qlge->payload_copy_thresh != data) {
744bafec742SSukumar Swaminathan 			qlge->payload_copy_thresh = data;
745bafec742SSukumar Swaminathan 		}
746bafec742SSukumar Swaminathan 	}
747bafec742SSukumar Swaminathan 
748bafec742SSukumar Swaminathan 	/* large send offload (LSO) capability. */
749bafec742SSukumar Swaminathan 	qlge->lso_enable = 1;
750bafec742SSukumar Swaminathan 	data = ql_get_prop(qlge, "lso_enable");
751bafec742SSukumar Swaminathan 	/* if data is valid */
752accf27a5SSukumar Swaminathan 	if ((data == 0) || (data == 1)) {
753bafec742SSukumar Swaminathan 		if (qlge->lso_enable != data) {
754bafec742SSukumar Swaminathan 			qlge->lso_enable = (uint16_t)data;
755bafec742SSukumar Swaminathan 		}
756bafec742SSukumar Swaminathan 	}
757accf27a5SSukumar Swaminathan 
758accf27a5SSukumar Swaminathan 	/* dcbx capability. */
759accf27a5SSukumar Swaminathan 	qlge->dcbx_enable = 1;
760accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "dcbx_enable");
761accf27a5SSukumar Swaminathan 	/* if data is valid */
762accf27a5SSukumar Swaminathan 	if ((data == 0) || (data == 1)) {
763accf27a5SSukumar Swaminathan 		if (qlge->dcbx_enable != data) {
764accf27a5SSukumar Swaminathan 			qlge->dcbx_enable = (uint16_t)data;
765accf27a5SSukumar Swaminathan 		}
766accf27a5SSukumar Swaminathan 	}
767accf27a5SSukumar Swaminathan 	/* fault management enable */
768accf27a5SSukumar Swaminathan 	qlge->fm_enable = B_TRUE;
769accf27a5SSukumar Swaminathan 	data = ql_get_prop(qlge, "fm-enable");
770accf27a5SSukumar Swaminathan 	if ((data == 0x1) || (data == 0)) {
771accf27a5SSukumar Swaminathan 		qlge->fm_enable = (boolean_t)data;
772accf27a5SSukumar Swaminathan 	}
773accf27a5SSukumar Swaminathan 
774bafec742SSukumar Swaminathan }
775bafec742SSukumar Swaminathan 
776bafec742SSukumar Swaminathan /*
777bafec742SSukumar Swaminathan  * Enable global interrupt
778bafec742SSukumar Swaminathan  */
779bafec742SSukumar Swaminathan static void
ql_enable_global_interrupt(qlge_t * qlge)780bafec742SSukumar Swaminathan ql_enable_global_interrupt(qlge_t *qlge)
781bafec742SSukumar Swaminathan {
782bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
783bafec742SSukumar Swaminathan 	    (INTR_EN_EI << 16) | INTR_EN_EI);
784bafec742SSukumar Swaminathan 	qlge->flags |= INTERRUPTS_ENABLED;
785bafec742SSukumar Swaminathan }
786bafec742SSukumar Swaminathan 
787bafec742SSukumar Swaminathan /*
788bafec742SSukumar Swaminathan  * Disable global interrupt
789bafec742SSukumar Swaminathan  */
790bafec742SSukumar Swaminathan static void
ql_disable_global_interrupt(qlge_t * qlge)791bafec742SSukumar Swaminathan ql_disable_global_interrupt(qlge_t *qlge)
792bafec742SSukumar Swaminathan {
793bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
794bafec742SSukumar Swaminathan 	qlge->flags &= ~INTERRUPTS_ENABLED;
795bafec742SSukumar Swaminathan }
796bafec742SSukumar Swaminathan 
797bafec742SSukumar Swaminathan /*
798bafec742SSukumar Swaminathan  * Enable one ring interrupt
799bafec742SSukumar Swaminathan  */
800bafec742SSukumar Swaminathan void
ql_enable_completion_interrupt(qlge_t * qlge,uint32_t intr)801bafec742SSukumar Swaminathan ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
802bafec742SSukumar Swaminathan {
803bafec742SSukumar Swaminathan 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
804bafec742SSukumar Swaminathan 
805bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
806bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
807bafec742SSukumar Swaminathan 
808bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
809bafec742SSukumar Swaminathan 		/*
810bafec742SSukumar Swaminathan 		 * Always enable if we're MSIX multi interrupts and
811bafec742SSukumar Swaminathan 		 * it's not the default (zeroeth) interrupt.
812bafec742SSukumar Swaminathan 		 */
813bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
814bafec742SSukumar Swaminathan 		return;
815bafec742SSukumar Swaminathan 	}
816bafec742SSukumar Swaminathan 
817bafec742SSukumar Swaminathan 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
818bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
819bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
820bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
821bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INTR,
822bafec742SSukumar Swaminathan 		    ("%s(%d): write %x to intr enable register \n",
823bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ctx->intr_en_mask));
824bafec742SSukumar Swaminathan 	}
825bafec742SSukumar Swaminathan }
826bafec742SSukumar Swaminathan 
827bafec742SSukumar Swaminathan /*
828bafec742SSukumar Swaminathan  * ql_forced_disable_completion_interrupt
829bafec742SSukumar Swaminathan  * Used by call from OS, may be called without
830bafec742SSukumar Swaminathan  * a pending interrupt so force the disable
831bafec742SSukumar Swaminathan  */
832bafec742SSukumar Swaminathan uint32_t
ql_forced_disable_completion_interrupt(qlge_t * qlge,uint32_t intr)833bafec742SSukumar Swaminathan ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
834bafec742SSukumar Swaminathan {
835bafec742SSukumar Swaminathan 	uint32_t var = 0;
836bafec742SSukumar Swaminathan 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
837bafec742SSukumar Swaminathan 
838bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
839bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
840bafec742SSukumar Swaminathan 
841bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
842bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
843bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
844bafec742SSukumar Swaminathan 		return (var);
845bafec742SSukumar Swaminathan 	}
846bafec742SSukumar Swaminathan 
847bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
848bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
849bafec742SSukumar Swaminathan 	var = ql_read_reg(qlge, REG_STATUS);
850bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
851bafec742SSukumar Swaminathan 
852bafec742SSukumar Swaminathan 	return (var);
853bafec742SSukumar Swaminathan }
854bafec742SSukumar Swaminathan 
855bafec742SSukumar Swaminathan /*
856bafec742SSukumar Swaminathan  * Disable a completion interrupt
857bafec742SSukumar Swaminathan  */
858bafec742SSukumar Swaminathan void
ql_disable_completion_interrupt(qlge_t * qlge,uint32_t intr)859bafec742SSukumar Swaminathan ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
860bafec742SSukumar Swaminathan {
861bafec742SSukumar Swaminathan 	struct intr_ctx *ctx;
862bafec742SSukumar Swaminathan 
863bafec742SSukumar Swaminathan 	ctx = qlge->intr_ctx + intr;
864bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
865bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr, ctx->irq_cnt));
866bafec742SSukumar Swaminathan 	/*
867bafec742SSukumar Swaminathan 	 * HW disables for us if we're MSIX multi interrupts and
868bafec742SSukumar Swaminathan 	 * it's not the default (zeroeth) interrupt.
869bafec742SSukumar Swaminathan 	 */
870bafec742SSukumar Swaminathan 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
871bafec742SSukumar Swaminathan 		return;
872bafec742SSukumar Swaminathan 
873bafec742SSukumar Swaminathan 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
874bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
875bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
876bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
877bafec742SSukumar Swaminathan 	}
878bafec742SSukumar Swaminathan 	atomic_inc_32(&ctx->irq_cnt);
879bafec742SSukumar Swaminathan }
880bafec742SSukumar Swaminathan 
881bafec742SSukumar Swaminathan /*
882bafec742SSukumar Swaminathan  * Enable all completion interrupts
883bafec742SSukumar Swaminathan  */
884bafec742SSukumar Swaminathan static void
ql_enable_all_completion_interrupts(qlge_t * qlge)885bafec742SSukumar Swaminathan ql_enable_all_completion_interrupts(qlge_t *qlge)
886bafec742SSukumar Swaminathan {
887bafec742SSukumar Swaminathan 	int i;
888bafec742SSukumar Swaminathan 	uint32_t value = 1;
889bafec742SSukumar Swaminathan 
890bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
891bafec742SSukumar Swaminathan 		/*
892bafec742SSukumar Swaminathan 		 * Set the count to 1 for Legacy / MSI interrupts or for the
893bafec742SSukumar Swaminathan 		 * default interrupt (0)
894bafec742SSukumar Swaminathan 		 */
895bafec742SSukumar Swaminathan 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
896bafec742SSukumar Swaminathan 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
897bafec742SSukumar Swaminathan 		}
898bafec742SSukumar Swaminathan 		ql_enable_completion_interrupt(qlge, i);
899bafec742SSukumar Swaminathan 	}
900bafec742SSukumar Swaminathan }
901bafec742SSukumar Swaminathan 
902bafec742SSukumar Swaminathan /*
903bafec742SSukumar Swaminathan  * Disable all completion interrupts
904bafec742SSukumar Swaminathan  */
905bafec742SSukumar Swaminathan static void
ql_disable_all_completion_interrupts(qlge_t * qlge)906bafec742SSukumar Swaminathan ql_disable_all_completion_interrupts(qlge_t *qlge)
907bafec742SSukumar Swaminathan {
908bafec742SSukumar Swaminathan 	int i;
909bafec742SSukumar Swaminathan 	uint32_t value = 0;
910bafec742SSukumar Swaminathan 
911bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
912bafec742SSukumar Swaminathan 
913bafec742SSukumar Swaminathan 		/*
914bafec742SSukumar Swaminathan 		 * Set the count to 0 for Legacy / MSI interrupts or for the
915bafec742SSukumar Swaminathan 		 * default interrupt (0)
916bafec742SSukumar Swaminathan 		 */
917bafec742SSukumar Swaminathan 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
918bafec742SSukumar Swaminathan 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
919bafec742SSukumar Swaminathan 
920bafec742SSukumar Swaminathan 		ql_disable_completion_interrupt(qlge, i);
921bafec742SSukumar Swaminathan 	}
922bafec742SSukumar Swaminathan }
923bafec742SSukumar Swaminathan 
924bafec742SSukumar Swaminathan /*
925bafec742SSukumar Swaminathan  * Update small buffer queue producer index
926bafec742SSukumar Swaminathan  */
927bafec742SSukumar Swaminathan static void
ql_update_sbq_prod_idx(qlge_t * qlge,struct rx_ring * rx_ring)928bafec742SSukumar Swaminathan ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
929bafec742SSukumar Swaminathan {
930bafec742SSukumar Swaminathan 	/* Update the buffer producer index */
931bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
932bafec742SSukumar Swaminathan 	    rx_ring->sbq_prod_idx));
933bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
934bafec742SSukumar Swaminathan 	    rx_ring->sbq_prod_idx);
935bafec742SSukumar Swaminathan }
936bafec742SSukumar Swaminathan 
937bafec742SSukumar Swaminathan /*
938bafec742SSukumar Swaminathan  * Update large buffer queue producer index
939bafec742SSukumar Swaminathan  */
940bafec742SSukumar Swaminathan static void
ql_update_lbq_prod_idx(qlge_t * qlge,struct rx_ring * rx_ring)941bafec742SSukumar Swaminathan ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
942bafec742SSukumar Swaminathan {
943bafec742SSukumar Swaminathan 	/* Update the buffer producer index */
944bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
945bafec742SSukumar Swaminathan 	    rx_ring->lbq_prod_idx));
946bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
947bafec742SSukumar Swaminathan 	    rx_ring->lbq_prod_idx);
948bafec742SSukumar Swaminathan }
949bafec742SSukumar Swaminathan 
950bafec742SSukumar Swaminathan /*
951bafec742SSukumar Swaminathan  * Adds a small buffer descriptor to end of its in use list,
952bafec742SSukumar Swaminathan  * assumes sbq_lock is already taken
953bafec742SSukumar Swaminathan  */
954bafec742SSukumar Swaminathan static void
ql_add_sbuf_to_in_use_list(struct rx_ring * rx_ring,struct bq_desc * sbq_desc)955bafec742SSukumar Swaminathan ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
956bafec742SSukumar Swaminathan     struct bq_desc *sbq_desc)
957bafec742SSukumar Swaminathan {
958bafec742SSukumar Swaminathan 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
959bafec742SSukumar Swaminathan 
960bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
961bafec742SSukumar Swaminathan 	inuse_idx++;
962bafec742SSukumar Swaminathan 	if (inuse_idx >= rx_ring->sbq_len)
963bafec742SSukumar Swaminathan 		inuse_idx = 0;
964bafec742SSukumar Swaminathan 	rx_ring->sbq_use_tail = inuse_idx;
965bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
966bafec742SSukumar Swaminathan 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
967bafec742SSukumar Swaminathan }
968bafec742SSukumar Swaminathan 
969bafec742SSukumar Swaminathan /*
970bafec742SSukumar Swaminathan  * Get a small buffer descriptor from its in use list
971bafec742SSukumar Swaminathan  */
972bafec742SSukumar Swaminathan static struct bq_desc *
ql_get_sbuf_from_in_use_list(struct rx_ring * rx_ring)973bafec742SSukumar Swaminathan ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
974bafec742SSukumar Swaminathan {
975bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc = NULL;
976bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
977bafec742SSukumar Swaminathan 
978bafec742SSukumar Swaminathan 	/* Pick from head of in use list */
979bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->sbq_use_head;
980bafec742SSukumar Swaminathan 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
981bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
982bafec742SSukumar Swaminathan 
983bafec742SSukumar Swaminathan 	if (sbq_desc != NULL) {
984bafec742SSukumar Swaminathan 		inuse_idx++;
985bafec742SSukumar Swaminathan 		if (inuse_idx >= rx_ring->sbq_len)
986bafec742SSukumar Swaminathan 			inuse_idx = 0;
987bafec742SSukumar Swaminathan 		rx_ring->sbq_use_head = inuse_idx;
988bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
989bafec742SSukumar Swaminathan 		atomic_inc_32(&rx_ring->rx_indicate);
990bafec742SSukumar Swaminathan 		sbq_desc->upl_inuse = 1;
991bafec742SSukumar Swaminathan 		/* if mp is NULL */
992bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
993bafec742SSukumar Swaminathan 			/* try to remap mp again */
994bafec742SSukumar Swaminathan 			sbq_desc->mp =
995bafec742SSukumar Swaminathan 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
996bafec742SSukumar Swaminathan 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
997bafec742SSukumar Swaminathan 		}
998bafec742SSukumar Swaminathan 	}
999bafec742SSukumar Swaminathan 
1000bafec742SSukumar Swaminathan 	return (sbq_desc);
1001bafec742SSukumar Swaminathan }
1002bafec742SSukumar Swaminathan 
1003bafec742SSukumar Swaminathan /*
1004bafec742SSukumar Swaminathan  * Add a small buffer descriptor to its free list
1005bafec742SSukumar Swaminathan  */
1006bafec742SSukumar Swaminathan static void
ql_add_sbuf_to_free_list(struct rx_ring * rx_ring,struct bq_desc * sbq_desc)1007bafec742SSukumar Swaminathan ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
1008bafec742SSukumar Swaminathan     struct bq_desc *sbq_desc)
1009bafec742SSukumar Swaminathan {
1010bafec742SSukumar Swaminathan 	uint32_t free_idx;
1011bafec742SSukumar Swaminathan 
1012bafec742SSukumar Swaminathan 	/* Add to the end of free list */
1013bafec742SSukumar Swaminathan 	free_idx = rx_ring->sbq_free_tail;
1014bafec742SSukumar Swaminathan 	rx_ring->sbuf_free[free_idx] = sbq_desc;
1015bafec742SSukumar Swaminathan 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
1016bafec742SSukumar Swaminathan 	free_idx++;
1017bafec742SSukumar Swaminathan 	if (free_idx >= rx_ring->sbq_len)
1018bafec742SSukumar Swaminathan 		free_idx = 0;
1019bafec742SSukumar Swaminathan 	rx_ring->sbq_free_tail = free_idx;
1020bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->sbuf_free_count);
1021bafec742SSukumar Swaminathan }
1022bafec742SSukumar Swaminathan 
1023bafec742SSukumar Swaminathan /*
1024bafec742SSukumar Swaminathan  * Get a small buffer descriptor from its free list
1025bafec742SSukumar Swaminathan  */
1026bafec742SSukumar Swaminathan static struct bq_desc *
ql_get_sbuf_from_free_list(struct rx_ring * rx_ring)1027bafec742SSukumar Swaminathan ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
1028bafec742SSukumar Swaminathan {
1029bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1030bafec742SSukumar Swaminathan 	uint32_t free_idx;
1031bafec742SSukumar Swaminathan 
1032bafec742SSukumar Swaminathan 	free_idx = rx_ring->sbq_free_head;
1033bafec742SSukumar Swaminathan 	/* Pick from top of free list */
1034bafec742SSukumar Swaminathan 	sbq_desc = rx_ring->sbuf_free[free_idx];
1035bafec742SSukumar Swaminathan 	rx_ring->sbuf_free[free_idx] = NULL;
1036bafec742SSukumar Swaminathan 	if (sbq_desc != NULL) {
1037bafec742SSukumar Swaminathan 		free_idx++;
1038bafec742SSukumar Swaminathan 		if (free_idx >= rx_ring->sbq_len)
1039bafec742SSukumar Swaminathan 			free_idx = 0;
1040bafec742SSukumar Swaminathan 		rx_ring->sbq_free_head = free_idx;
1041bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->sbuf_free_count);
1042bafec742SSukumar Swaminathan 	}
1043bafec742SSukumar Swaminathan 	return (sbq_desc);
1044bafec742SSukumar Swaminathan }
1045bafec742SSukumar Swaminathan 
1046bafec742SSukumar Swaminathan /*
1047bafec742SSukumar Swaminathan  * Add a large buffer descriptor to its in use list
1048bafec742SSukumar Swaminathan  */
1049bafec742SSukumar Swaminathan static void
ql_add_lbuf_to_in_use_list(struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1050bafec742SSukumar Swaminathan ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
1051bafec742SSukumar Swaminathan     struct bq_desc *lbq_desc)
1052bafec742SSukumar Swaminathan {
1053bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
1054bafec742SSukumar Swaminathan 
1055bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->lbq_use_tail;
1056bafec742SSukumar Swaminathan 
1057bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
1058bafec742SSukumar Swaminathan 	inuse_idx++;
1059bafec742SSukumar Swaminathan 	if (inuse_idx >= rx_ring->lbq_len)
1060bafec742SSukumar Swaminathan 		inuse_idx = 0;
1061bafec742SSukumar Swaminathan 	rx_ring->lbq_use_tail = inuse_idx;
1062bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
1063bafec742SSukumar Swaminathan }
1064bafec742SSukumar Swaminathan 
1065bafec742SSukumar Swaminathan /*
1066bafec742SSukumar Swaminathan  * Get a large buffer descriptor from in use list
1067bafec742SSukumar Swaminathan  */
1068bafec742SSukumar Swaminathan static struct bq_desc *
ql_get_lbuf_from_in_use_list(struct rx_ring * rx_ring)1069bafec742SSukumar Swaminathan ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
1070bafec742SSukumar Swaminathan {
1071bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1072bafec742SSukumar Swaminathan 	uint32_t inuse_idx;
1073bafec742SSukumar Swaminathan 
1074bafec742SSukumar Swaminathan 	/* Pick from head of in use list */
1075bafec742SSukumar Swaminathan 	inuse_idx = rx_ring->lbq_use_head;
1076bafec742SSukumar Swaminathan 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
1077bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
1078bafec742SSukumar Swaminathan 
1079bafec742SSukumar Swaminathan 	if (lbq_desc != NULL) {
1080bafec742SSukumar Swaminathan 		inuse_idx++;
1081bafec742SSukumar Swaminathan 		if (inuse_idx >= rx_ring->lbq_len)
1082bafec742SSukumar Swaminathan 			inuse_idx = 0;
1083bafec742SSukumar Swaminathan 		rx_ring->lbq_use_head = inuse_idx;
1084bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
1085bafec742SSukumar Swaminathan 		atomic_inc_32(&rx_ring->rx_indicate);
1086bafec742SSukumar Swaminathan 		lbq_desc->upl_inuse = 1;
1087bafec742SSukumar Swaminathan 
1088bafec742SSukumar Swaminathan 		/* if mp is NULL */
1089bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1090bafec742SSukumar Swaminathan 			/* try to remap mp again */
1091bafec742SSukumar Swaminathan 			lbq_desc->mp =
1092bafec742SSukumar Swaminathan 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1093bafec742SSukumar Swaminathan 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1094bafec742SSukumar Swaminathan 		}
1095bafec742SSukumar Swaminathan 	}
1096bafec742SSukumar Swaminathan 	return (lbq_desc);
1097bafec742SSukumar Swaminathan }
1098bafec742SSukumar Swaminathan 
1099bafec742SSukumar Swaminathan /*
1100bafec742SSukumar Swaminathan  * Add a large buffer descriptor to free list
1101bafec742SSukumar Swaminathan  */
1102bafec742SSukumar Swaminathan static void
ql_add_lbuf_to_free_list(struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1103bafec742SSukumar Swaminathan ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1104bafec742SSukumar Swaminathan     struct bq_desc *lbq_desc)
1105bafec742SSukumar Swaminathan {
1106bafec742SSukumar Swaminathan 	uint32_t free_idx;
1107bafec742SSukumar Swaminathan 
1108bafec742SSukumar Swaminathan 	/* Add to the end of free list */
1109bafec742SSukumar Swaminathan 	free_idx = rx_ring->lbq_free_tail;
1110bafec742SSukumar Swaminathan 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1111bafec742SSukumar Swaminathan 	free_idx++;
1112bafec742SSukumar Swaminathan 	if (free_idx >= rx_ring->lbq_len)
1113bafec742SSukumar Swaminathan 		free_idx = 0;
1114bafec742SSukumar Swaminathan 	rx_ring->lbq_free_tail = free_idx;
1115bafec742SSukumar Swaminathan 	atomic_inc_32(&rx_ring->lbuf_free_count);
1116bafec742SSukumar Swaminathan 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1117bafec742SSukumar Swaminathan }
1118bafec742SSukumar Swaminathan 
1119bafec742SSukumar Swaminathan /*
1120bafec742SSukumar Swaminathan  * Get a large buffer descriptor from its free list
1121bafec742SSukumar Swaminathan  */
1122bafec742SSukumar Swaminathan static struct bq_desc *
ql_get_lbuf_from_free_list(struct rx_ring * rx_ring)1123bafec742SSukumar Swaminathan ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1124bafec742SSukumar Swaminathan {
1125bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1126bafec742SSukumar Swaminathan 	uint32_t free_idx;
1127bafec742SSukumar Swaminathan 
1128bafec742SSukumar Swaminathan 	free_idx = rx_ring->lbq_free_head;
1129bafec742SSukumar Swaminathan 	/* Pick from head of free list */
1130bafec742SSukumar Swaminathan 	lbq_desc = rx_ring->lbuf_free[free_idx];
1131bafec742SSukumar Swaminathan 	rx_ring->lbuf_free[free_idx] = NULL;
1132bafec742SSukumar Swaminathan 
1133bafec742SSukumar Swaminathan 	if (lbq_desc != NULL) {
1134bafec742SSukumar Swaminathan 		free_idx++;
1135bafec742SSukumar Swaminathan 		if (free_idx >= rx_ring->lbq_len)
1136bafec742SSukumar Swaminathan 			free_idx = 0;
1137bafec742SSukumar Swaminathan 		rx_ring->lbq_free_head = free_idx;
1138bafec742SSukumar Swaminathan 		atomic_dec_32(&rx_ring->lbuf_free_count);
1139bafec742SSukumar Swaminathan 	}
1140bafec742SSukumar Swaminathan 	return (lbq_desc);
1141bafec742SSukumar Swaminathan }
1142bafec742SSukumar Swaminathan 
1143bafec742SSukumar Swaminathan /*
1144bafec742SSukumar Swaminathan  * Add a small buffer descriptor to free list
1145bafec742SSukumar Swaminathan  */
1146bafec742SSukumar Swaminathan static void
ql_refill_sbuf_free_list(struct bq_desc * sbq_desc,boolean_t alloc_memory)1147bafec742SSukumar Swaminathan ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1148bafec742SSukumar Swaminathan {
1149bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1150bafec742SSukumar Swaminathan 	uint64_t *sbq_entry;
1151bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1152bafec742SSukumar Swaminathan 	/*
1153bafec742SSukumar Swaminathan 	 * Sync access
1154bafec742SSukumar Swaminathan 	 */
1155bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->sbq_lock);
1156bafec742SSukumar Swaminathan 
1157bafec742SSukumar Swaminathan 	sbq_desc->upl_inuse = 0;
1158bafec742SSukumar Swaminathan 
1159bafec742SSukumar Swaminathan 	/*
1160bafec742SSukumar Swaminathan 	 * If we are freeing the buffers as a result of adapter unload, get out
1161bafec742SSukumar Swaminathan 	 */
116295369d7bSToomas Soome 	if ((sbq_desc->free_buf != 0) ||
1163bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_DETACH)) {
116495369d7bSToomas Soome 		if (sbq_desc->free_buf == 0)
1165bafec742SSukumar Swaminathan 			atomic_dec_32(&rx_ring->rx_indicate);
1166bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->sbq_lock);
1167bafec742SSukumar Swaminathan 		return;
1168bafec742SSukumar Swaminathan 	}
1169bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1170bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate == 0)
1171bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "sbq: indicate wrong");
1172bafec742SSukumar Swaminathan #endif
1173bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1174bafec742SSukumar Swaminathan 	uint32_t sb_consumer_idx;
1175bafec742SSukumar Swaminathan 	uint32_t sb_producer_idx;
1176bafec742SSukumar Swaminathan 	uint32_t num_free_buffers;
1177bafec742SSukumar Swaminathan 	uint32_t temp;
1178bafec742SSukumar Swaminathan 
1179bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1180bafec742SSukumar Swaminathan 	sb_producer_idx = temp & 0x0000ffff;
1181bafec742SSukumar Swaminathan 	sb_consumer_idx = (temp >> 16);
1182bafec742SSukumar Swaminathan 
1183bafec742SSukumar Swaminathan 	if (sb_consumer_idx > sb_producer_idx)
1184bafec742SSukumar Swaminathan 		num_free_buffers = NUM_SMALL_BUFFERS -
1185bafec742SSukumar Swaminathan 		    (sb_consumer_idx - sb_producer_idx);
1186bafec742SSukumar Swaminathan 	else
1187bafec742SSukumar Swaminathan 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1188bafec742SSukumar Swaminathan 
1189bafec742SSukumar Swaminathan 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1190bafec742SSukumar Swaminathan 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1191bafec742SSukumar Swaminathan 
1192bafec742SSukumar Swaminathan #endif
1193bafec742SSukumar Swaminathan 
1194bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1195bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate > 0xFF000000)
1196bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1197bafec742SSukumar Swaminathan 		    " sbq_desc index %d.",
1198bafec742SSukumar Swaminathan 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1199bafec742SSukumar Swaminathan 		    sbq_desc->index);
1200bafec742SSukumar Swaminathan #endif
1201bafec742SSukumar Swaminathan 	if (alloc_memory) {
1202bafec742SSukumar Swaminathan 		sbq_desc->mp =
1203bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1204bafec742SSukumar Swaminathan 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1205bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
1206bafec742SSukumar Swaminathan 			rx_ring->rx_failed_sbq_allocs++;
1207bafec742SSukumar Swaminathan 		}
1208bafec742SSukumar Swaminathan 	}
1209bafec742SSukumar Swaminathan 
1210bafec742SSukumar Swaminathan 	/* Got the packet from the stack decrement rx_indicate count */
1211bafec742SSukumar Swaminathan 	atomic_dec_32(&rx_ring->rx_indicate);
1212bafec742SSukumar Swaminathan 
1213bafec742SSukumar Swaminathan 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1214bafec742SSukumar Swaminathan 
1215bafec742SSukumar Swaminathan 	/* Rearm if possible */
1216bafec742SSukumar Swaminathan 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1217bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1218bafec742SSukumar Swaminathan 		sbq_entry = rx_ring->sbq_dma.vaddr;
1219bafec742SSukumar Swaminathan 		sbq_entry += rx_ring->sbq_prod_idx;
1220bafec742SSukumar Swaminathan 
1221bafec742SSukumar Swaminathan 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1222bafec742SSukumar Swaminathan 			/* Get first one from free list */
1223bafec742SSukumar Swaminathan 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1224bafec742SSukumar Swaminathan 
1225bafec742SSukumar Swaminathan 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1226bafec742SSukumar Swaminathan 			sbq_entry++;
1227bafec742SSukumar Swaminathan 			rx_ring->sbq_prod_idx++;
1228bafec742SSukumar Swaminathan 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1229bafec742SSukumar Swaminathan 				rx_ring->sbq_prod_idx = 0;
1230bafec742SSukumar Swaminathan 				sbq_entry = rx_ring->sbq_dma.vaddr;
1231bafec742SSukumar Swaminathan 			}
1232bafec742SSukumar Swaminathan 			/* Add to end of in use list */
1233bafec742SSukumar Swaminathan 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1234bafec742SSukumar Swaminathan 		}
1235bafec742SSukumar Swaminathan 
1236bafec742SSukumar Swaminathan 		/* Update small buffer queue producer index */
1237bafec742SSukumar Swaminathan 		ql_update_sbq_prod_idx(qlge, rx_ring);
1238bafec742SSukumar Swaminathan 	}
1239bafec742SSukumar Swaminathan 
1240bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->sbq_lock);
1241bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1242bafec742SSukumar Swaminathan 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1243bafec742SSukumar Swaminathan }
1244bafec742SSukumar Swaminathan 
1245bafec742SSukumar Swaminathan /*
1246bafec742SSukumar Swaminathan  * rx recycle call back function
1247bafec742SSukumar Swaminathan  */
1248bafec742SSukumar Swaminathan static void
ql_release_to_sbuf_free_list(caddr_t p)1249bafec742SSukumar Swaminathan ql_release_to_sbuf_free_list(caddr_t p)
1250bafec742SSukumar Swaminathan {
1251bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1252bafec742SSukumar Swaminathan 
1253bafec742SSukumar Swaminathan 	if (sbq_desc == NULL)
1254bafec742SSukumar Swaminathan 		return;
1255bafec742SSukumar Swaminathan 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1256bafec742SSukumar Swaminathan }
1257bafec742SSukumar Swaminathan 
1258bafec742SSukumar Swaminathan /*
1259bafec742SSukumar Swaminathan  * Add a large buffer descriptor to free list
1260bafec742SSukumar Swaminathan  */
1261bafec742SSukumar Swaminathan static void
ql_refill_lbuf_free_list(struct bq_desc * lbq_desc,boolean_t alloc_memory)1262bafec742SSukumar Swaminathan ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1263bafec742SSukumar Swaminathan {
1264bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1265bafec742SSukumar Swaminathan 	uint64_t *lbq_entry;
1266bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
1267bafec742SSukumar Swaminathan 
1268bafec742SSukumar Swaminathan 	/* Sync access */
1269bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->lbq_lock);
1270bafec742SSukumar Swaminathan 
1271bafec742SSukumar Swaminathan 	lbq_desc->upl_inuse = 0;
1272bafec742SSukumar Swaminathan 	/*
1273bafec742SSukumar Swaminathan 	 * If we are freeing the buffers as a result of adapter unload, get out
1274bafec742SSukumar Swaminathan 	 */
127595369d7bSToomas Soome 	if ((lbq_desc->free_buf != 0) ||
1276bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_DETACH)) {
127795369d7bSToomas Soome 		if (lbq_desc->free_buf == 0)
1278bafec742SSukumar Swaminathan 			atomic_dec_32(&rx_ring->rx_indicate);
1279bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->lbq_lock);
1280bafec742SSukumar Swaminathan 		return;
1281bafec742SSukumar Swaminathan 	}
1282bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1283bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate == 0)
1284bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: indicate wrong");
1285bafec742SSukumar Swaminathan #endif
1286bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1287bafec742SSukumar Swaminathan 	uint32_t lb_consumer_idx;
1288bafec742SSukumar Swaminathan 	uint32_t lb_producer_idx;
1289bafec742SSukumar Swaminathan 	uint32_t num_free_buffers;
1290bafec742SSukumar Swaminathan 	uint32_t temp;
1291bafec742SSukumar Swaminathan 
1292bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1293bafec742SSukumar Swaminathan 
1294bafec742SSukumar Swaminathan 	lb_producer_idx = temp & 0x0000ffff;
1295bafec742SSukumar Swaminathan 	lb_consumer_idx = (temp >> 16);
1296bafec742SSukumar Swaminathan 
1297bafec742SSukumar Swaminathan 	if (lb_consumer_idx > lb_producer_idx)
1298bafec742SSukumar Swaminathan 		num_free_buffers = NUM_LARGE_BUFFERS -
1299bafec742SSukumar Swaminathan 		    (lb_consumer_idx - lb_producer_idx);
1300bafec742SSukumar Swaminathan 	else
1301bafec742SSukumar Swaminathan 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1302bafec742SSukumar Swaminathan 
1303bafec742SSukumar Swaminathan 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1304bafec742SSukumar Swaminathan 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1305bafec742SSukumar Swaminathan 	}
1306bafec742SSukumar Swaminathan #endif
1307bafec742SSukumar Swaminathan 
1308bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1309bafec742SSukumar Swaminathan 	if (rx_ring->rx_indicate > 0xFF000000)
1310bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1311bafec742SSukumar Swaminathan 		    "lbq_desc index %d",
1312bafec742SSukumar Swaminathan 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1313bafec742SSukumar Swaminathan 		    lbq_desc->index);
1314bafec742SSukumar Swaminathan #endif
1315bafec742SSukumar Swaminathan 	if (alloc_memory) {
1316bafec742SSukumar Swaminathan 		lbq_desc->mp =
1317bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1318bafec742SSukumar Swaminathan 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1319bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1320bafec742SSukumar Swaminathan 			rx_ring->rx_failed_lbq_allocs++;
1321bafec742SSukumar Swaminathan 		}
1322bafec742SSukumar Swaminathan 	}
1323bafec742SSukumar Swaminathan 
1324bafec742SSukumar Swaminathan 	/* Got the packet from the stack decrement rx_indicate count */
1325bafec742SSukumar Swaminathan 	atomic_dec_32(&rx_ring->rx_indicate);
1326bafec742SSukumar Swaminathan 
1327bafec742SSukumar Swaminathan 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1328bafec742SSukumar Swaminathan 
1329bafec742SSukumar Swaminathan 	/* Rearm if possible */
1330bafec742SSukumar Swaminathan 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1331bafec742SSukumar Swaminathan 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1332bafec742SSukumar Swaminathan 		lbq_entry = rx_ring->lbq_dma.vaddr;
1333bafec742SSukumar Swaminathan 		lbq_entry += rx_ring->lbq_prod_idx;
1334bafec742SSukumar Swaminathan 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1335bafec742SSukumar Swaminathan 			/* Get first one from free list */
1336bafec742SSukumar Swaminathan 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1337bafec742SSukumar Swaminathan 
1338bafec742SSukumar Swaminathan 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1339bafec742SSukumar Swaminathan 			lbq_entry++;
1340bafec742SSukumar Swaminathan 			rx_ring->lbq_prod_idx++;
1341bafec742SSukumar Swaminathan 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1342bafec742SSukumar Swaminathan 				rx_ring->lbq_prod_idx = 0;
1343bafec742SSukumar Swaminathan 				lbq_entry = rx_ring->lbq_dma.vaddr;
1344bafec742SSukumar Swaminathan 			}
1345bafec742SSukumar Swaminathan 
1346bafec742SSukumar Swaminathan 			/* Add to end of in use list */
1347bafec742SSukumar Swaminathan 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1348bafec742SSukumar Swaminathan 		}
1349bafec742SSukumar Swaminathan 
1350bafec742SSukumar Swaminathan 		/* Update large buffer queue producer index */
1351bafec742SSukumar Swaminathan 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1352bafec742SSukumar Swaminathan 	}
1353bafec742SSukumar Swaminathan 
1354bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->lbq_lock);
1355bafec742SSukumar Swaminathan 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1356bafec742SSukumar Swaminathan 	    __func__, rx_ring->lbuf_free_count));
1357bafec742SSukumar Swaminathan }
1358bafec742SSukumar Swaminathan /*
1359bafec742SSukumar Swaminathan  * rx recycle call back function
1360bafec742SSukumar Swaminathan  */
1361bafec742SSukumar Swaminathan static void
ql_release_to_lbuf_free_list(caddr_t p)1362bafec742SSukumar Swaminathan ql_release_to_lbuf_free_list(caddr_t p)
1363bafec742SSukumar Swaminathan {
1364bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1365bafec742SSukumar Swaminathan 
1366bafec742SSukumar Swaminathan 	if (lbq_desc == NULL)
1367bafec742SSukumar Swaminathan 		return;
1368bafec742SSukumar Swaminathan 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1369bafec742SSukumar Swaminathan }
1370bafec742SSukumar Swaminathan 
1371bafec742SSukumar Swaminathan /*
1372bafec742SSukumar Swaminathan  * free small buffer queue buffers
1373bafec742SSukumar Swaminathan  */
1374bafec742SSukumar Swaminathan static void
ql_free_sbq_buffers(struct rx_ring * rx_ring)1375bafec742SSukumar Swaminathan ql_free_sbq_buffers(struct rx_ring *rx_ring)
1376bafec742SSukumar Swaminathan {
1377bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1378bafec742SSukumar Swaminathan 	uint32_t i;
1379bafec742SSukumar Swaminathan 	uint32_t j = rx_ring->sbq_free_head;
1380bafec742SSukumar Swaminathan 	int  force_cnt = 0;
1381bafec742SSukumar Swaminathan 
1382bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1383bafec742SSukumar Swaminathan 		sbq_desc = rx_ring->sbuf_free[j];
1384bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1385bafec742SSukumar Swaminathan 		j++;
1386bafec742SSukumar Swaminathan 		if (j >= rx_ring->sbq_len) {
1387bafec742SSukumar Swaminathan 			j = 0;
1388bafec742SSukumar Swaminathan 		}
1389bafec742SSukumar Swaminathan 		if (sbq_desc->mp != NULL) {
1390bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
1391bafec742SSukumar Swaminathan 			sbq_desc->mp = NULL;
1392bafec742SSukumar Swaminathan 		}
1393bafec742SSukumar Swaminathan 	}
1394bafec742SSukumar Swaminathan 	rx_ring->sbuf_free_count = 0;
1395bafec742SSukumar Swaminathan 
1396bafec742SSukumar Swaminathan 	j = rx_ring->sbq_use_head;
1397bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1398bafec742SSukumar Swaminathan 		sbq_desc = rx_ring->sbuf_in_use[j];
1399bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1400bafec742SSukumar Swaminathan 		j++;
1401bafec742SSukumar Swaminathan 		if (j >= rx_ring->sbq_len) {
1402bafec742SSukumar Swaminathan 			j = 0;
1403bafec742SSukumar Swaminathan 		}
1404bafec742SSukumar Swaminathan 		if (sbq_desc->mp != NULL) {
1405bafec742SSukumar Swaminathan 			freemsg(sbq_desc->mp);
1406bafec742SSukumar Swaminathan 			sbq_desc->mp = NULL;
1407bafec742SSukumar Swaminathan 		}
1408bafec742SSukumar Swaminathan 	}
1409bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use_count = 0;
1410bafec742SSukumar Swaminathan 
1411bafec742SSukumar Swaminathan 	sbq_desc = &rx_ring->sbq_desc[0];
1412bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1413bafec742SSukumar Swaminathan 		/*
1414bafec742SSukumar Swaminathan 		 * Set flag so that the callback does not allocate a new buffer
1415bafec742SSukumar Swaminathan 		 */
1416bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 1;
1417bafec742SSukumar Swaminathan 		if (sbq_desc->upl_inuse != 0) {
1418bafec742SSukumar Swaminathan 			force_cnt++;
1419bafec742SSukumar Swaminathan 		}
1420bafec742SSukumar Swaminathan 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1421bafec742SSukumar Swaminathan 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1422bafec742SSukumar Swaminathan 			    &sbq_desc->bd_dma.acc_handle);
1423bafec742SSukumar Swaminathan 			sbq_desc->bd_dma.dma_handle = NULL;
1424bafec742SSukumar Swaminathan 			sbq_desc->bd_dma.acc_handle = NULL;
1425bafec742SSukumar Swaminathan 		}
1426bafec742SSukumar Swaminathan 	}
1427bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1428bafec742SSukumar Swaminathan 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1429bafec742SSukumar Swaminathan 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1430bafec742SSukumar Swaminathan #endif
1431bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_in_use != NULL) {
1432bafec742SSukumar Swaminathan 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1433bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1434bafec742SSukumar Swaminathan 		rx_ring->sbuf_in_use = NULL;
1435bafec742SSukumar Swaminathan 	}
1436bafec742SSukumar Swaminathan 
1437bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free != NULL) {
1438bafec742SSukumar Swaminathan 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1439bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1440bafec742SSukumar Swaminathan 		rx_ring->sbuf_free = NULL;
1441bafec742SSukumar Swaminathan 	}
1442bafec742SSukumar Swaminathan }
1443bafec742SSukumar Swaminathan 
1444bafec742SSukumar Swaminathan /* Allocate small buffers */
1445bafec742SSukumar Swaminathan static int
ql_alloc_sbufs(qlge_t * qlge,struct rx_ring * rx_ring)1446bafec742SSukumar Swaminathan ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1447bafec742SSukumar Swaminathan {
1448bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1449bafec742SSukumar Swaminathan 	int i;
1450bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
1451bafec742SSukumar Swaminathan 
1452cddcb3daSSukumar Swaminathan 	rx_ring->sbq_use_head = 0;
1453cddcb3daSSukumar Swaminathan 	rx_ring->sbq_use_tail = 0;
1454cddcb3daSSukumar Swaminathan 	rx_ring->sbuf_in_use_count = 0;
1455cddcb3daSSukumar Swaminathan 	rx_ring->sbq_free_head = 0;
1456cddcb3daSSukumar Swaminathan 	rx_ring->sbq_free_tail = 0;
1457cddcb3daSSukumar Swaminathan 	rx_ring->sbuf_free_count = 0;
1458bafec742SSukumar Swaminathan 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1459bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1460bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free == NULL) {
1461bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1462bafec742SSukumar Swaminathan 		    "!%s: sbuf_free_list alloc: failed",
1463bafec742SSukumar Swaminathan 		    __func__);
1464bafec742SSukumar Swaminathan 		goto alloc_sbuf_err;
1465bafec742SSukumar Swaminathan 	}
1466bafec742SSukumar Swaminathan 
1467bafec742SSukumar Swaminathan 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1468bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1469bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_in_use == NULL) {
1470bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1471bafec742SSukumar Swaminathan 		    "!%s: sbuf_inuse_list alloc: failed",
1472bafec742SSukumar Swaminathan 		    __func__);
1473bafec742SSukumar Swaminathan 		goto alloc_sbuf_err;
1474bafec742SSukumar Swaminathan 	}
1475cddcb3daSSukumar Swaminathan 
1476bafec742SSukumar Swaminathan 	sbq_desc = &rx_ring->sbq_desc[0];
1477bafec742SSukumar Swaminathan 
1478bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1479bafec742SSukumar Swaminathan 		/* Allocate buffer */
1480accf27a5SSukumar Swaminathan 		if (ql_alloc_phys_rbuf(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1481bafec742SSukumar Swaminathan 		    &ql_buf_acc_attr,
1482bafec742SSukumar Swaminathan 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1483bafec742SSukumar Swaminathan 		    &sbq_desc->bd_dma.acc_handle,
1484bafec742SSukumar Swaminathan 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1485bafec742SSukumar Swaminathan 		    (size_t)0,				/* default alignment */
1486bafec742SSukumar Swaminathan 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1487bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
1488bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
1489bafec742SSukumar Swaminathan 			    "!%s: ddi_dma_alloc_handle: failed",
1490bafec742SSukumar Swaminathan 			    __func__);
1491bafec742SSukumar Swaminathan 			goto alloc_sbuf_err;
1492bafec742SSukumar Swaminathan 		}
1493bafec742SSukumar Swaminathan 
1494bafec742SSukumar Swaminathan 		/* Set context for Return buffer callback */
1495bafec742SSukumar Swaminathan 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1496bafec742SSukumar Swaminathan 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1497bafec742SSukumar Swaminathan 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1498bafec742SSukumar Swaminathan 		sbq_desc->rx_ring = rx_ring;
1499bafec742SSukumar Swaminathan 		sbq_desc->upl_inuse = 0;
1500bafec742SSukumar Swaminathan 		sbq_desc->free_buf = 0;
1501bafec742SSukumar Swaminathan 
1502bafec742SSukumar Swaminathan 		sbq_desc->mp =
1503bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1504bafec742SSukumar Swaminathan 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1505bafec742SSukumar Swaminathan 		if (sbq_desc->mp == NULL) {
1506bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1507bafec742SSukumar Swaminathan 			goto alloc_sbuf_err;
1508bafec742SSukumar Swaminathan 		}
1509bafec742SSukumar Swaminathan 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1510bafec742SSukumar Swaminathan 	}
1511bafec742SSukumar Swaminathan 
1512bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1513bafec742SSukumar Swaminathan 
1514bafec742SSukumar Swaminathan alloc_sbuf_err:
1515bafec742SSukumar Swaminathan 	ql_free_sbq_buffers(rx_ring);
1516bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1517bafec742SSukumar Swaminathan }
1518bafec742SSukumar Swaminathan 
1519bafec742SSukumar Swaminathan static void
ql_free_lbq_buffers(struct rx_ring * rx_ring)1520bafec742SSukumar Swaminathan ql_free_lbq_buffers(struct rx_ring *rx_ring)
1521bafec742SSukumar Swaminathan {
1522bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1523bafec742SSukumar Swaminathan 	uint32_t i, j;
1524bafec742SSukumar Swaminathan 	int force_cnt = 0;
1525bafec742SSukumar Swaminathan 
1526bafec742SSukumar Swaminathan 	j = rx_ring->lbq_free_head;
1527bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1528bafec742SSukumar Swaminathan 		lbq_desc = rx_ring->lbuf_free[j];
1529bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1530bafec742SSukumar Swaminathan 		j++;
1531bafec742SSukumar Swaminathan 		if (j >= rx_ring->lbq_len)
1532bafec742SSukumar Swaminathan 			j = 0;
1533bafec742SSukumar Swaminathan 		if (lbq_desc->mp != NULL) {
1534bafec742SSukumar Swaminathan 			freemsg(lbq_desc->mp);
1535bafec742SSukumar Swaminathan 			lbq_desc->mp = NULL;
1536bafec742SSukumar Swaminathan 		}
1537bafec742SSukumar Swaminathan 	}
1538bafec742SSukumar Swaminathan 	rx_ring->lbuf_free_count = 0;
1539bafec742SSukumar Swaminathan 
1540bafec742SSukumar Swaminathan 	j = rx_ring->lbq_use_head;
1541bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1542bafec742SSukumar Swaminathan 		lbq_desc = rx_ring->lbuf_in_use[j];
1543bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1544bafec742SSukumar Swaminathan 		j++;
1545bafec742SSukumar Swaminathan 		if (j >= rx_ring->lbq_len) {
1546bafec742SSukumar Swaminathan 			j = 0;
1547bafec742SSukumar Swaminathan 		}
1548bafec742SSukumar Swaminathan 		if (lbq_desc->mp != NULL) {
1549bafec742SSukumar Swaminathan 			freemsg(lbq_desc->mp);
1550bafec742SSukumar Swaminathan 			lbq_desc->mp = NULL;
1551bafec742SSukumar Swaminathan 		}
1552bafec742SSukumar Swaminathan 	}
1553bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use_count = 0;
1554bafec742SSukumar Swaminathan 
1555bafec742SSukumar Swaminathan 	lbq_desc = &rx_ring->lbq_desc[0];
1556bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1557bafec742SSukumar Swaminathan 		/* Set flag so that callback will not allocate a new buffer */
1558bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 1;
1559bafec742SSukumar Swaminathan 		if (lbq_desc->upl_inuse != 0) {
1560bafec742SSukumar Swaminathan 			force_cnt++;
1561bafec742SSukumar Swaminathan 		}
1562bafec742SSukumar Swaminathan 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1563bafec742SSukumar Swaminathan 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1564bafec742SSukumar Swaminathan 			    &lbq_desc->bd_dma.acc_handle);
1565bafec742SSukumar Swaminathan 			lbq_desc->bd_dma.dma_handle = NULL;
1566bafec742SSukumar Swaminathan 			lbq_desc->bd_dma.acc_handle = NULL;
1567bafec742SSukumar Swaminathan 		}
1568bafec742SSukumar Swaminathan 	}
1569bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1570bafec742SSukumar Swaminathan 	if (force_cnt) {
1571bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1572bafec742SSukumar Swaminathan 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1573bafec742SSukumar Swaminathan 		    force_cnt);
1574bafec742SSukumar Swaminathan 	}
1575bafec742SSukumar Swaminathan #endif
1576bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_in_use != NULL) {
1577bafec742SSukumar Swaminathan 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1578bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1579bafec742SSukumar Swaminathan 		rx_ring->lbuf_in_use = NULL;
1580bafec742SSukumar Swaminathan 	}
1581bafec742SSukumar Swaminathan 
1582bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free != NULL) {
1583bafec742SSukumar Swaminathan 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1584bafec742SSukumar Swaminathan 		    sizeof (struct bq_desc *)));
1585bafec742SSukumar Swaminathan 		rx_ring->lbuf_free = NULL;
1586bafec742SSukumar Swaminathan 	}
1587bafec742SSukumar Swaminathan }
1588bafec742SSukumar Swaminathan 
1589bafec742SSukumar Swaminathan /* Allocate large buffers */
1590bafec742SSukumar Swaminathan static int
ql_alloc_lbufs(qlge_t * qlge,struct rx_ring * rx_ring)1591bafec742SSukumar Swaminathan ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1592bafec742SSukumar Swaminathan {
1593bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1594bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
1595bafec742SSukumar Swaminathan 	int i;
1596bafec742SSukumar Swaminathan 	uint32_t lbq_buf_size;
1597bafec742SSukumar Swaminathan 
1598cddcb3daSSukumar Swaminathan 	rx_ring->lbq_use_head = 0;
1599cddcb3daSSukumar Swaminathan 	rx_ring->lbq_use_tail = 0;
1600cddcb3daSSukumar Swaminathan 	rx_ring->lbuf_in_use_count = 0;
1601cddcb3daSSukumar Swaminathan 	rx_ring->lbq_free_head = 0;
1602cddcb3daSSukumar Swaminathan 	rx_ring->lbq_free_tail = 0;
1603cddcb3daSSukumar Swaminathan 	rx_ring->lbuf_free_count = 0;
1604bafec742SSukumar Swaminathan 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1605bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1606bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free == NULL) {
1607bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1608bafec742SSukumar Swaminathan 		    "!%s: lbuf_free_list alloc: failed",
1609bafec742SSukumar Swaminathan 		    __func__);
1610bafec742SSukumar Swaminathan 		goto alloc_lbuf_err;
1611bafec742SSukumar Swaminathan 	}
1612bafec742SSukumar Swaminathan 
1613bafec742SSukumar Swaminathan 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1614bafec742SSukumar Swaminathan 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1615bafec742SSukumar Swaminathan 
1616bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_in_use == NULL) {
1617bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
1618bafec742SSukumar Swaminathan 		    "!%s: lbuf_inuse_list alloc: failed",
1619bafec742SSukumar Swaminathan 		    __func__);
1620bafec742SSukumar Swaminathan 		goto alloc_lbuf_err;
1621bafec742SSukumar Swaminathan 	}
1622bafec742SSukumar Swaminathan 
1623bafec742SSukumar Swaminathan 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1624accf27a5SSukumar Swaminathan 	    LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE;
1625bafec742SSukumar Swaminathan 
1626bafec742SSukumar Swaminathan 	lbq_desc = &rx_ring->lbq_desc[0];
1627bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1628bafec742SSukumar Swaminathan 		rx_ring->lbq_buf_size = lbq_buf_size;
1629bafec742SSukumar Swaminathan 		/* Allocate buffer */
1630accf27a5SSukumar Swaminathan 		if (ql_alloc_phys_rbuf(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1631bafec742SSukumar Swaminathan 		    &ql_buf_acc_attr,
1632bafec742SSukumar Swaminathan 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1633bafec742SSukumar Swaminathan 		    &lbq_desc->bd_dma.acc_handle,
1634bafec742SSukumar Swaminathan 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1635bafec742SSukumar Swaminathan 		    (size_t)0, /* default alignment */
1636bafec742SSukumar Swaminathan 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1637bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
1638bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
1639bafec742SSukumar Swaminathan 			    "!%s: ddi_dma_alloc_handle: failed",
1640bafec742SSukumar Swaminathan 			    __func__);
1641bafec742SSukumar Swaminathan 			goto alloc_lbuf_err;
1642bafec742SSukumar Swaminathan 		}
1643bafec742SSukumar Swaminathan 
1644bafec742SSukumar Swaminathan 		/* Set context for Return buffer callback */
1645bafec742SSukumar Swaminathan 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1646bafec742SSukumar Swaminathan 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1647bafec742SSukumar Swaminathan 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1648bafec742SSukumar Swaminathan 		lbq_desc->rx_ring = rx_ring;
1649bafec742SSukumar Swaminathan 		lbq_desc->upl_inuse = 0;
1650bafec742SSukumar Swaminathan 		lbq_desc->free_buf = 0;
1651bafec742SSukumar Swaminathan 
1652bafec742SSukumar Swaminathan 		lbq_desc->mp =
1653bafec742SSukumar Swaminathan 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1654bafec742SSukumar Swaminathan 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1655bafec742SSukumar Swaminathan 		if (lbq_desc->mp == NULL) {
1656bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1657bafec742SSukumar Swaminathan 			goto alloc_lbuf_err;
1658bafec742SSukumar Swaminathan 		}
1659bafec742SSukumar Swaminathan 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1660bafec742SSukumar Swaminathan 	} /* For all large buffers */
1661bafec742SSukumar Swaminathan 
1662bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1663bafec742SSukumar Swaminathan 
1664bafec742SSukumar Swaminathan alloc_lbuf_err:
1665bafec742SSukumar Swaminathan 	ql_free_lbq_buffers(rx_ring);
1666bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1667bafec742SSukumar Swaminathan }
1668bafec742SSukumar Swaminathan 
1669bafec742SSukumar Swaminathan /*
1670bafec742SSukumar Swaminathan  * Free rx buffers
1671bafec742SSukumar Swaminathan  */
1672bafec742SSukumar Swaminathan static void
ql_free_rx_buffers(qlge_t * qlge)1673bafec742SSukumar Swaminathan ql_free_rx_buffers(qlge_t *qlge)
1674bafec742SSukumar Swaminathan {
1675bafec742SSukumar Swaminathan 	int i;
1676bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
1677bafec742SSukumar Swaminathan 
1678bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1679bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
1680bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
1681bafec742SSukumar Swaminathan 			ql_free_lbq_buffers(rx_ring);
1682bafec742SSukumar Swaminathan 			ql_free_sbq_buffers(rx_ring);
1683bafec742SSukumar Swaminathan 		}
1684bafec742SSukumar Swaminathan 	}
1685bafec742SSukumar Swaminathan }
1686bafec742SSukumar Swaminathan 
1687bafec742SSukumar Swaminathan /*
1688bafec742SSukumar Swaminathan  * Allocate rx buffers
1689bafec742SSukumar Swaminathan  */
1690bafec742SSukumar Swaminathan static int
ql_alloc_rx_buffers(qlge_t * qlge)1691bafec742SSukumar Swaminathan ql_alloc_rx_buffers(qlge_t *qlge)
1692bafec742SSukumar Swaminathan {
1693bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
1694bafec742SSukumar Swaminathan 	int i;
1695bafec742SSukumar Swaminathan 
1696bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1697bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
1698bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
1699bafec742SSukumar Swaminathan 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1700bafec742SSukumar Swaminathan 				goto alloc_err;
1701bafec742SSukumar Swaminathan 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1702bafec742SSukumar Swaminathan 				goto alloc_err;
1703bafec742SSukumar Swaminathan 		}
1704bafec742SSukumar Swaminathan 	}
1705bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
1706bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
1707bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type == RX_Q) {
1708bafec742SSukumar Swaminathan 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1709bafec742SSukumar Swaminathan 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1710bafec742SSukumar Swaminathan 		}
1711bafec742SSukumar Swaminathan 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1712bafec742SSukumar Swaminathan 	}
1713bafec742SSukumar Swaminathan #endif
1714bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
1715bafec742SSukumar Swaminathan 
1716bafec742SSukumar Swaminathan alloc_err:
1717cddcb3daSSukumar Swaminathan 	ql_free_rx_buffers(qlge);
1718bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
1719bafec742SSukumar Swaminathan }
1720bafec742SSukumar Swaminathan 
1721bafec742SSukumar Swaminathan /*
1722bafec742SSukumar Swaminathan  * Initialize large buffer queue ring
1723bafec742SSukumar Swaminathan  */
1724bafec742SSukumar Swaminathan static void
ql_init_lbq_ring(struct rx_ring * rx_ring)1725bafec742SSukumar Swaminathan ql_init_lbq_ring(struct rx_ring *rx_ring)
1726bafec742SSukumar Swaminathan {
1727bafec742SSukumar Swaminathan 	uint16_t i;
1728bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1729bafec742SSukumar Swaminathan 
1730bafec742SSukumar Swaminathan 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1731bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->lbq_len; i++) {
1732bafec742SSukumar Swaminathan 		lbq_desc = &rx_ring->lbq_desc[i];
1733bafec742SSukumar Swaminathan 		lbq_desc->index = i;
1734bafec742SSukumar Swaminathan 	}
1735bafec742SSukumar Swaminathan }
1736bafec742SSukumar Swaminathan 
1737bafec742SSukumar Swaminathan /*
1738bafec742SSukumar Swaminathan  * Initialize small buffer queue ring
1739bafec742SSukumar Swaminathan  */
1740bafec742SSukumar Swaminathan static void
ql_init_sbq_ring(struct rx_ring * rx_ring)1741bafec742SSukumar Swaminathan ql_init_sbq_ring(struct rx_ring *rx_ring)
1742bafec742SSukumar Swaminathan {
1743bafec742SSukumar Swaminathan 	uint16_t i;
1744bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1745bafec742SSukumar Swaminathan 
1746bafec742SSukumar Swaminathan 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1747bafec742SSukumar Swaminathan 	for (i = 0; i < rx_ring->sbq_len; i++) {
1748bafec742SSukumar Swaminathan 		sbq_desc = &rx_ring->sbq_desc[i];
1749bafec742SSukumar Swaminathan 		sbq_desc->index = i;
1750bafec742SSukumar Swaminathan 	}
1751bafec742SSukumar Swaminathan }
1752bafec742SSukumar Swaminathan 
1753bafec742SSukumar Swaminathan /*
1754bafec742SSukumar Swaminathan  * Calculate the pseudo-header checksum if hardware can not do
1755bafec742SSukumar Swaminathan  */
1756bafec742SSukumar Swaminathan static void
ql_pseudo_cksum(uint8_t * buf)1757bafec742SSukumar Swaminathan ql_pseudo_cksum(uint8_t *buf)
1758bafec742SSukumar Swaminathan {
1759bafec742SSukumar Swaminathan 	uint32_t cksum;
1760bafec742SSukumar Swaminathan 	uint16_t iphl;
1761bafec742SSukumar Swaminathan 	uint16_t proto;
1762bafec742SSukumar Swaminathan 
1763bafec742SSukumar Swaminathan 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1764bafec742SSukumar Swaminathan 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1765bafec742SSukumar Swaminathan 	cksum += proto = buf[9];
1766bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1767bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1768bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1769bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1770bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1771bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1772bafec742SSukumar Swaminathan 
1773bafec742SSukumar Swaminathan 	/*
1774bafec742SSukumar Swaminathan 	 * Point it to the TCP/UDP header, and
1775bafec742SSukumar Swaminathan 	 * update the checksum field.
1776bafec742SSukumar Swaminathan 	 */
1777bafec742SSukumar Swaminathan 	buf += iphl + ((proto == IPPROTO_TCP) ?
1778bafec742SSukumar Swaminathan 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1779bafec742SSukumar Swaminathan 
1780bafec742SSukumar Swaminathan 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1781bafec742SSukumar Swaminathan 
1782bafec742SSukumar Swaminathan }
1783bafec742SSukumar Swaminathan 
1784bafec742SSukumar Swaminathan /*
1785bafec742SSukumar Swaminathan  * Transmit an incoming packet.
1786bafec742SSukumar Swaminathan  */
1787bafec742SSukumar Swaminathan mblk_t *
ql_ring_tx(void * arg,mblk_t * mp)1788bafec742SSukumar Swaminathan ql_ring_tx(void *arg, mblk_t *mp)
1789bafec742SSukumar Swaminathan {
1790bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1791bafec742SSukumar Swaminathan 	qlge_t *qlge = tx_ring->qlge;
1792bafec742SSukumar Swaminathan 	mblk_t *next;
1793bafec742SSukumar Swaminathan 	int rval;
1794bafec742SSukumar Swaminathan 	uint32_t tx_count = 0;
1795bafec742SSukumar Swaminathan 
1796bafec742SSukumar Swaminathan 	if (qlge->port_link_state == LS_DOWN) {
1797bafec742SSukumar Swaminathan 		/* can not send message while link is down */
1798bafec742SSukumar Swaminathan 		mblk_t *tp;
1799bafec742SSukumar Swaminathan 
1800bafec742SSukumar Swaminathan 		while (mp != NULL) {
1801bafec742SSukumar Swaminathan 			tp = mp->b_next;
1802bafec742SSukumar Swaminathan 			mp->b_next = NULL;
1803bafec742SSukumar Swaminathan 			freemsg(mp);
1804bafec742SSukumar Swaminathan 			mp = tp;
1805bafec742SSukumar Swaminathan 		}
1806bafec742SSukumar Swaminathan 		goto exit;
1807bafec742SSukumar Swaminathan 	}
1808bafec742SSukumar Swaminathan 
1809bafec742SSukumar Swaminathan 	mutex_enter(&tx_ring->tx_lock);
1810bafec742SSukumar Swaminathan 	/* if mac is not started, driver is not ready, can not send */
1811bafec742SSukumar Swaminathan 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1812bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1813bafec742SSukumar Swaminathan 		    " return packets",
1814bafec742SSukumar Swaminathan 		    __func__, qlge->instance, tx_ring->mac_flags);
1815bafec742SSukumar Swaminathan 		mutex_exit(&tx_ring->tx_lock);
1816bafec742SSukumar Swaminathan 		goto exit;
1817bafec742SSukumar Swaminathan 	}
1818bafec742SSukumar Swaminathan 
1819bafec742SSukumar Swaminathan 	/* we must try to send all */
1820bafec742SSukumar Swaminathan 	while (mp != NULL) {
1821bafec742SSukumar Swaminathan 		/*
1822bafec742SSukumar Swaminathan 		 * if number of available slots is less than a threshold,
1823bafec742SSukumar Swaminathan 		 * then quit
1824bafec742SSukumar Swaminathan 		 */
1825bafec742SSukumar Swaminathan 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1826bafec742SSukumar Swaminathan 			tx_ring->queue_stopped = 1;
1827bafec742SSukumar Swaminathan 			rval = DDI_FAILURE;
1828bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1829bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) no resources",
1830bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
1831bafec742SSukumar Swaminathan #endif
1832bafec742SSukumar Swaminathan 			tx_ring->defer++;
1833bafec742SSukumar Swaminathan 			/*
1834bafec742SSukumar Swaminathan 			 * If we return the buffer back we are expected to call
1835bafec742SSukumar Swaminathan 			 * mac_tx_ring_update() when resources are available
1836bafec742SSukumar Swaminathan 			 */
1837bafec742SSukumar Swaminathan 			break;
1838bafec742SSukumar Swaminathan 		}
1839bafec742SSukumar Swaminathan 
1840bafec742SSukumar Swaminathan 		next = mp->b_next;
1841bafec742SSukumar Swaminathan 		mp->b_next = NULL;
1842bafec742SSukumar Swaminathan 
1843bafec742SSukumar Swaminathan 		rval = ql_send_common(tx_ring, mp);
1844bafec742SSukumar Swaminathan 
1845bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS) {
1846bafec742SSukumar Swaminathan 			mp->b_next = next;
1847bafec742SSukumar Swaminathan 			break;
1848bafec742SSukumar Swaminathan 		}
1849bafec742SSukumar Swaminathan 		tx_count++;
1850bafec742SSukumar Swaminathan 		mp = next;
1851bafec742SSukumar Swaminathan 	}
1852bafec742SSukumar Swaminathan 
1853bafec742SSukumar Swaminathan 	/*
1854bafec742SSukumar Swaminathan 	 * After all msg blocks are mapped or copied to tx buffer,
1855bafec742SSukumar Swaminathan 	 * trigger the hardware to send!
1856bafec742SSukumar Swaminathan 	 */
1857bafec742SSukumar Swaminathan 	if (tx_count > 0) {
1858bafec742SSukumar Swaminathan 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1859bafec742SSukumar Swaminathan 		    tx_ring->prod_idx);
1860bafec742SSukumar Swaminathan 	}
1861bafec742SSukumar Swaminathan 
1862bafec742SSukumar Swaminathan 	mutex_exit(&tx_ring->tx_lock);
1863bafec742SSukumar Swaminathan exit:
1864bafec742SSukumar Swaminathan 	return (mp);
1865bafec742SSukumar Swaminathan }
1866bafec742SSukumar Swaminathan 
1867bafec742SSukumar Swaminathan 
1868bafec742SSukumar Swaminathan /*
1869bafec742SSukumar Swaminathan  * This function builds an mblk list for the given inbound
1870bafec742SSukumar Swaminathan  * completion.
1871bafec742SSukumar Swaminathan  */
1872bafec742SSukumar Swaminathan 
1873bafec742SSukumar Swaminathan static mblk_t *
ql_build_rx_mp(qlge_t * qlge,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1874bafec742SSukumar Swaminathan ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1875bafec742SSukumar Swaminathan     struct ib_mac_iocb_rsp *ib_mac_rsp)
1876bafec742SSukumar Swaminathan {
1877bafec742SSukumar Swaminathan 	mblk_t *mp = NULL;
1878bafec742SSukumar Swaminathan 	mblk_t *mp1 = NULL;	/* packet header */
1879bafec742SSukumar Swaminathan 	mblk_t *mp2 = NULL;	/* packet content */
1880bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
1881bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
1882bafec742SSukumar Swaminathan 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1883bafec742SSukumar Swaminathan 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1884bafec742SSukumar Swaminathan 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1885bafec742SSukumar Swaminathan 	uint32_t pkt_len = payload_len + header_len;
1886bafec742SSukumar Swaminathan 	uint32_t done;
1887bafec742SSukumar Swaminathan 	uint64_t *curr_ial_ptr;
1888bafec742SSukumar Swaminathan 	uint32_t ial_data_addr_low;
1889bafec742SSukumar Swaminathan 	uint32_t actual_data_addr_low;
1890bafec742SSukumar Swaminathan 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1891bafec742SSukumar Swaminathan 	uint32_t size;
1892a6766df4SSukumar Swaminathan 	uint32_t cp_offset;
1893a6766df4SSukumar Swaminathan 	boolean_t rx_copy = B_FALSE;
1894a6766df4SSukumar Swaminathan 	mblk_t *tp = NULL;
1895bafec742SSukumar Swaminathan 
1896bafec742SSukumar Swaminathan 	/*
1897bafec742SSukumar Swaminathan 	 * Check if error flags are set
1898bafec742SSukumar Swaminathan 	 */
1899bafec742SSukumar Swaminathan 	if (err_flag != 0) {
1900bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1901bafec742SSukumar Swaminathan 			rx_ring->frame_too_long++;
1902bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1903bafec742SSukumar Swaminathan 			rx_ring->frame_too_short++;
1904bafec742SSukumar Swaminathan 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1905bafec742SSukumar Swaminathan 			rx_ring->fcs_err++;
1906bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1907bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1908bafec742SSukumar Swaminathan #endif
1909bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1910bafec742SSukumar Swaminathan 		    (uint8_t *)ib_mac_rsp, 8,
1911bafec742SSukumar Swaminathan 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1912bafec742SSukumar Swaminathan 	}
1913bafec742SSukumar Swaminathan 
1914bafec742SSukumar Swaminathan 	/* header should not be in large buffer */
1915bafec742SSukumar Swaminathan 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1916bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1917bafec742SSukumar Swaminathan 		err_flag |= 1;
1918bafec742SSukumar Swaminathan 	}
1919accf27a5SSukumar Swaminathan 	/* if whole packet is too big than rx buffer size */
1920accf27a5SSukumar Swaminathan 	if (pkt_len > qlge->max_frame_size) {
1921accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "ql_build_rx_mpframe too long(%d)!", pkt_len);
1922accf27a5SSukumar Swaminathan 		err_flag |= 1;
1923accf27a5SSukumar Swaminathan 	}
1924a6766df4SSukumar Swaminathan 	if (qlge->rx_copy ||
1925a6766df4SSukumar Swaminathan 	    (rx_ring->sbuf_in_use_count <= qlge->rx_copy_threshold) ||
1926a6766df4SSukumar Swaminathan 	    (rx_ring->lbuf_in_use_count <= qlge->rx_copy_threshold)) {
1927a6766df4SSukumar Swaminathan 		rx_copy = B_TRUE;
1928a6766df4SSukumar Swaminathan 	}
1929accf27a5SSukumar Swaminathan 
1930a6766df4SSukumar Swaminathan 	/* if using rx copy mode, we need to allocate a big enough buffer */
1931a6766df4SSukumar Swaminathan 	if (rx_copy) {
1932a6766df4SSukumar Swaminathan 		qlge->stats.norcvbuf++;
1933a6766df4SSukumar Swaminathan 		tp = allocb(payload_len + header_len + qlge->ip_hdr_offset,
1934a6766df4SSukumar Swaminathan 		    BPRI_MED);
1935a6766df4SSukumar Swaminathan 		if (tp == NULL) {
1936a6766df4SSukumar Swaminathan 			cmn_err(CE_WARN, "rx copy failed to allocate memory");
1937a6766df4SSukumar Swaminathan 		} else {
1938a6766df4SSukumar Swaminathan 			tp->b_rptr += qlge->ip_hdr_offset;
1939a6766df4SSukumar Swaminathan 		}
1940a6766df4SSukumar Swaminathan 	}
1941bafec742SSukumar Swaminathan 	/*
1942bafec742SSukumar Swaminathan 	 * Handle the header buffer if present.
1943bafec742SSukumar Swaminathan 	 * packet header must be valid and saved in one small buffer
1944bafec742SSukumar Swaminathan 	 * broadcast/multicast packets' headers not splitted
1945bafec742SSukumar Swaminathan 	 */
1946bafec742SSukumar Swaminathan 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1947bafec742SSukumar Swaminathan 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1948bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1949bafec742SSukumar Swaminathan 		    header_len));
1950bafec742SSukumar Swaminathan 		/* Sync access */
1951bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1952bafec742SSukumar Swaminathan 
1953bafec742SSukumar Swaminathan 		ASSERT(sbq_desc != NULL);
1954bafec742SSukumar Swaminathan 
1955bafec742SSukumar Swaminathan 		/*
1956bafec742SSukumar Swaminathan 		 * Validate addresses from the ASIC with the
1957bafec742SSukumar Swaminathan 		 * expected sbuf address
1958bafec742SSukumar Swaminathan 		 */
1959bafec742SSukumar Swaminathan 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1960bafec742SSukumar Swaminathan 		    != ib_mac_rsp->hdr_addr) {
1961bafec742SSukumar Swaminathan 			/* Small buffer address mismatch */
1962bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1963bafec742SSukumar Swaminathan 			    " in wrong small buffer",
1964bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
1965accf27a5SSukumar Swaminathan 			goto fatal_error;
1966bafec742SSukumar Swaminathan 		}
1967bafec742SSukumar Swaminathan 		/* get this packet */
1968bafec742SSukumar Swaminathan 		mp1 = sbq_desc->mp;
1969a6766df4SSukumar Swaminathan 		/* Flush DMA'd data */
1970a6766df4SSukumar Swaminathan 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1971a6766df4SSukumar Swaminathan 		    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1972a6766df4SSukumar Swaminathan 
1973bafec742SSukumar Swaminathan 		if ((err_flag != 0)|| (mp1 == NULL)) {
1974bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
1975bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
1976bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "get header from small buffer fail");
1977bafec742SSukumar Swaminathan #endif
1978bafec742SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1979bafec742SSukumar Swaminathan 			mp1 = NULL;
1980a6766df4SSukumar Swaminathan 		} else if (rx_copy) {
1981a6766df4SSukumar Swaminathan 			if (tp != NULL) {
1982a6766df4SSukumar Swaminathan 				bcopy(sbq_desc->bd_dma.vaddr, tp->b_rptr,
1983a6766df4SSukumar Swaminathan 				    header_len);
1984a6766df4SSukumar Swaminathan 			}
1985a6766df4SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1986a6766df4SSukumar Swaminathan 			mp1 = NULL;
1987bafec742SSukumar Swaminathan 		} else {
1988bafec742SSukumar Swaminathan 			if ((qlge->ip_hdr_offset != 0)&&
1989bafec742SSukumar Swaminathan 			    (header_len < SMALL_BUFFER_SIZE)) {
1990bafec742SSukumar Swaminathan 				/*
1991bafec742SSukumar Swaminathan 				 * copy entire header to a 2 bytes boundary
1992bafec742SSukumar Swaminathan 				 * address for 8100 adapters so that the IP
1993bafec742SSukumar Swaminathan 				 * header can be on a 4 byte boundary address
1994bafec742SSukumar Swaminathan 				 */
1995bafec742SSukumar Swaminathan 				bcopy(mp1->b_rptr,
1996bafec742SSukumar Swaminathan 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1997bafec742SSukumar Swaminathan 				    qlge->ip_hdr_offset),
1998bafec742SSukumar Swaminathan 				    header_len);
1999bafec742SSukumar Swaminathan 				mp1->b_rptr += SMALL_BUFFER_SIZE +
2000bafec742SSukumar Swaminathan 				    qlge->ip_hdr_offset;
2001bafec742SSukumar Swaminathan 			}
2002bafec742SSukumar Swaminathan 
2003bafec742SSukumar Swaminathan 			/*
2004bafec742SSukumar Swaminathan 			 * Adjust the mp payload_len to match
2005bafec742SSukumar Swaminathan 			 * the packet header payload_len
2006bafec742SSukumar Swaminathan 			 */
2007bafec742SSukumar Swaminathan 			mp1->b_wptr = mp1->b_rptr + header_len;
2008bafec742SSukumar Swaminathan 			mp1->b_next = mp1->b_cont = NULL;
2009bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
2010bafec742SSukumar Swaminathan 			    (uint8_t *)mp1->b_rptr, 8, header_len);
2011bafec742SSukumar Swaminathan 		}
2012bafec742SSukumar Swaminathan 	}
2013bafec742SSukumar Swaminathan 
2014bafec742SSukumar Swaminathan 	/*
2015bafec742SSukumar Swaminathan 	 * packet data or whole packet can be in small or one or
2016bafec742SSukumar Swaminathan 	 * several large buffer(s)
2017bafec742SSukumar Swaminathan 	 */
2018bafec742SSukumar Swaminathan 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2019bafec742SSukumar Swaminathan 		/*
2020bafec742SSukumar Swaminathan 		 * The data is in a single small buffer.
2021bafec742SSukumar Swaminathan 		 */
2022bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2023bafec742SSukumar Swaminathan 
2024bafec742SSukumar Swaminathan 		ASSERT(sbq_desc != NULL);
2025bafec742SSukumar Swaminathan 
2026bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2027bafec742SSukumar Swaminathan 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
2028bafec742SSukumar Swaminathan 		    "sbq_desc->bd_dma.dma_addr = %x,"
2029bafec742SSukumar Swaminathan 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2030bafec742SSukumar Swaminathan 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
2031bafec742SSukumar Swaminathan 		    ib_mac_rsp->data_addr, sbq_desc->mp));
2032bafec742SSukumar Swaminathan 
2033bafec742SSukumar Swaminathan 		/*
2034bafec742SSukumar Swaminathan 		 * Validate  addresses from the ASIC with the
2035bafec742SSukumar Swaminathan 		 * expected sbuf address
2036bafec742SSukumar Swaminathan 		 */
2037bafec742SSukumar Swaminathan 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
2038bafec742SSukumar Swaminathan 		    != ib_mac_rsp->data_addr) {
2039bafec742SSukumar Swaminathan 			/* Small buffer address mismatch */
2040bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2041bafec742SSukumar Swaminathan 			    " in wrong small buffer",
2042bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
2043accf27a5SSukumar Swaminathan 			goto fatal_error;
2044bafec742SSukumar Swaminathan 		}
2045bafec742SSukumar Swaminathan 		/* get this packet */
2046bafec742SSukumar Swaminathan 		mp2 = sbq_desc->mp;
2047a6766df4SSukumar Swaminathan 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
2048a6766df4SSukumar Swaminathan 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2049bafec742SSukumar Swaminathan 		if ((err_flag != 0) || (mp2 == NULL)) {
2050bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2051bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
2052bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2053bafec742SSukumar Swaminathan #endif
2054bafec742SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2055bafec742SSukumar Swaminathan 			mp2 = NULL;
2056a6766df4SSukumar Swaminathan 		} else if (rx_copy) {
2057a6766df4SSukumar Swaminathan 			if (tp != NULL) {
2058a6766df4SSukumar Swaminathan 				bcopy(sbq_desc->bd_dma.vaddr,
2059a6766df4SSukumar Swaminathan 				    tp->b_rptr + header_len, payload_len);
2060a6766df4SSukumar Swaminathan 				tp->b_wptr =
2061a6766df4SSukumar Swaminathan 				    tp->b_rptr + header_len + payload_len;
2062a6766df4SSukumar Swaminathan 			}
2063a6766df4SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2064a6766df4SSukumar Swaminathan 			mp2 = NULL;
2065bafec742SSukumar Swaminathan 		} else {
2066bafec742SSukumar Swaminathan 			/* Adjust the buffer length to match the payload_len */
2067bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + payload_len;
2068bafec742SSukumar Swaminathan 			mp2->b_next = mp2->b_cont = NULL;
2069bafec742SSukumar Swaminathan 			/* Flush DMA'd data */
2070bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2071bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2072bafec742SSukumar Swaminathan 			/*
2073bafec742SSukumar Swaminathan 			 * if payload is too small , copy to
2074bafec742SSukumar Swaminathan 			 * the end of packet header
2075bafec742SSukumar Swaminathan 			 */
2076bafec742SSukumar Swaminathan 			if ((mp1 != NULL) &&
2077bafec742SSukumar Swaminathan 			    (payload_len <= qlge->payload_copy_thresh) &&
2078bafec742SSukumar Swaminathan 			    (pkt_len <
2079bafec742SSukumar Swaminathan 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2080bafec742SSukumar Swaminathan 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2081bafec742SSukumar Swaminathan 				mp1->b_wptr += payload_len;
2082bafec742SSukumar Swaminathan 				freemsg(mp2);
2083bafec742SSukumar Swaminathan 				mp2 = NULL;
2084bafec742SSukumar Swaminathan 			}
2085bafec742SSukumar Swaminathan 		}
2086bafec742SSukumar Swaminathan 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2087bafec742SSukumar Swaminathan 		/*
2088bafec742SSukumar Swaminathan 		 * The data is in a single large buffer.
2089bafec742SSukumar Swaminathan 		 */
2090bafec742SSukumar Swaminathan 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2091bafec742SSukumar Swaminathan 
2092bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2093bafec742SSukumar Swaminathan 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
2094bafec742SSukumar Swaminathan 		    "lbq_desc->bd_dma.dma_addr = %x,"
2095bafec742SSukumar Swaminathan 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2096bafec742SSukumar Swaminathan 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
2097bafec742SSukumar Swaminathan 		    ib_mac_rsp->data_addr, lbq_desc->mp));
2098bafec742SSukumar Swaminathan 
2099bafec742SSukumar Swaminathan 		ASSERT(lbq_desc != NULL);
2100bafec742SSukumar Swaminathan 
2101bafec742SSukumar Swaminathan 		/*
2102bafec742SSukumar Swaminathan 		 * Validate  addresses from the ASIC with
2103bafec742SSukumar Swaminathan 		 * the expected lbuf address
2104bafec742SSukumar Swaminathan 		 */
2105bafec742SSukumar Swaminathan 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
2106bafec742SSukumar Swaminathan 		    != ib_mac_rsp->data_addr) {
2107bafec742SSukumar Swaminathan 			/* Large buffer address mismatch */
2108bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2109bafec742SSukumar Swaminathan 			    " in wrong large buffer",
2110bafec742SSukumar Swaminathan 			    __func__, qlge->instance, rx_ring->cq_id);
2111accf27a5SSukumar Swaminathan 			goto fatal_error;
2112bafec742SSukumar Swaminathan 		}
2113bafec742SSukumar Swaminathan 		mp2 = lbq_desc->mp;
2114a6766df4SSukumar Swaminathan 		/* Flush DMA'd data */
2115a6766df4SSukumar Swaminathan 		(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2116a6766df4SSukumar Swaminathan 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2117bafec742SSukumar Swaminathan 		if ((err_flag != 0) || (mp2 == NULL)) {
2118bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2119bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from large buffer");
2120bafec742SSukumar Swaminathan #endif
2121bafec742SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
2122bafec742SSukumar Swaminathan 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2123bafec742SSukumar Swaminathan 			mp2 = NULL;
2124a6766df4SSukumar Swaminathan 		} else if (rx_copy) {
2125a6766df4SSukumar Swaminathan 			if (tp != NULL) {
2126a6766df4SSukumar Swaminathan 				bcopy(lbq_desc->bd_dma.vaddr,
2127a6766df4SSukumar Swaminathan 				    tp->b_rptr + header_len, payload_len);
2128a6766df4SSukumar Swaminathan 				tp->b_wptr =
2129a6766df4SSukumar Swaminathan 				    tp->b_rptr + header_len + payload_len;
2130a6766df4SSukumar Swaminathan 			}
2131a6766df4SSukumar Swaminathan 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2132a6766df4SSukumar Swaminathan 			mp2 = NULL;
2133bafec742SSukumar Swaminathan 		} else {
2134bafec742SSukumar Swaminathan 			/*
2135bafec742SSukumar Swaminathan 			 * Adjust the buffer length to match
2136bafec742SSukumar Swaminathan 			 * the packet payload_len
2137bafec742SSukumar Swaminathan 			 */
2138bafec742SSukumar Swaminathan 			mp2->b_wptr = mp2->b_rptr + payload_len;
2139bafec742SSukumar Swaminathan 			mp2->b_next = mp2->b_cont = NULL;
2140bafec742SSukumar Swaminathan 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2141bafec742SSukumar Swaminathan 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2142bafec742SSukumar Swaminathan 			/*
2143bafec742SSukumar Swaminathan 			 * if payload is too small , copy to
2144bafec742SSukumar Swaminathan 			 * the end of packet header
2145bafec742SSukumar Swaminathan 			 */
2146bafec742SSukumar Swaminathan 			if ((mp1 != NULL) &&
2147bafec742SSukumar Swaminathan 			    (payload_len <= qlge->payload_copy_thresh) &&
2148bafec742SSukumar Swaminathan 			    (pkt_len<
2149bafec742SSukumar Swaminathan 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2150bafec742SSukumar Swaminathan 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2151bafec742SSukumar Swaminathan 				mp1->b_wptr += payload_len;
2152bafec742SSukumar Swaminathan 				freemsg(mp2);
2153bafec742SSukumar Swaminathan 				mp2 = NULL;
2154bafec742SSukumar Swaminathan 			}
2155bafec742SSukumar Swaminathan 		}
2156a6766df4SSukumar Swaminathan 	} else if (payload_len) { /* ial case */
2157bafec742SSukumar Swaminathan 		/*
2158bafec742SSukumar Swaminathan 		 * payload available but not in sml nor lrg buffer,
2159bafec742SSukumar Swaminathan 		 * so, it is saved in IAL
2160bafec742SSukumar Swaminathan 		 */
2161bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2162bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2163bafec742SSukumar Swaminathan #endif
2164bafec742SSukumar Swaminathan 		/* lrg buf addresses are saved in one small buffer */
2165bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2166bafec742SSukumar Swaminathan 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2167bafec742SSukumar Swaminathan 		done = 0;
2168a6766df4SSukumar Swaminathan 		cp_offset = 0;
2169a6766df4SSukumar Swaminathan 
2170bafec742SSukumar Swaminathan 		while (!done) {
2171bafec742SSukumar Swaminathan 			ial_data_addr_low =
2172bafec742SSukumar Swaminathan 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2173bafec742SSukumar Swaminathan 			    0xFFFFFFFE);
2174bafec742SSukumar Swaminathan 			/* check if this is the last packet fragment */
2175bafec742SSukumar Swaminathan 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2176bafec742SSukumar Swaminathan 			curr_ial_ptr++;
2177bafec742SSukumar Swaminathan 			/*
2178bafec742SSukumar Swaminathan 			 * The data is in one or several large buffer(s).
2179bafec742SSukumar Swaminathan 			 */
2180bafec742SSukumar Swaminathan 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2181bafec742SSukumar Swaminathan 			actual_data_addr_low =
2182bafec742SSukumar Swaminathan 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2183bafec742SSukumar Swaminathan 			    0xFFFFFFFE);
2184bafec742SSukumar Swaminathan 			if (ial_data_addr_low != actual_data_addr_low) {
2185bafec742SSukumar Swaminathan 				cmn_err(CE_WARN,
2186bafec742SSukumar Swaminathan 				    "packet saved in wrong ial lrg buffer"
2187bafec742SSukumar Swaminathan 				    " expected %x, actual %lx",
2188bafec742SSukumar Swaminathan 				    ial_data_addr_low,
2189bafec742SSukumar Swaminathan 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2190accf27a5SSukumar Swaminathan 				goto fatal_error;
2191bafec742SSukumar Swaminathan 			}
2192bafec742SSukumar Swaminathan 
2193bafec742SSukumar Swaminathan 			size = (payload_len < rx_ring->lbq_buf_size)?
2194bafec742SSukumar Swaminathan 			    payload_len : rx_ring->lbq_buf_size;
2195bafec742SSukumar Swaminathan 			payload_len -= size;
2196accf27a5SSukumar Swaminathan 			mp2 = lbq_desc->mp;
2197accf27a5SSukumar Swaminathan 			if ((err_flag != 0) || (mp2 == NULL)) {
2198accf27a5SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2199accf27a5SSukumar Swaminathan 				cmn_err(CE_WARN,
2200accf27a5SSukumar Swaminathan 				    "ignore bad data from large buffer");
2201accf27a5SSukumar Swaminathan #endif
2202accf27a5SSukumar Swaminathan 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2203accf27a5SSukumar Swaminathan 				mp2 = NULL;
2204a6766df4SSukumar Swaminathan 			} else if (rx_copy) {
2205a6766df4SSukumar Swaminathan 				if (tp != NULL) {
2206a6766df4SSukumar Swaminathan 					(void) ddi_dma_sync(
2207a6766df4SSukumar Swaminathan 					    lbq_desc->bd_dma.dma_handle,
2208a6766df4SSukumar Swaminathan 					    0, size, DDI_DMA_SYNC_FORKERNEL);
2209a6766df4SSukumar Swaminathan 					bcopy(lbq_desc->bd_dma.vaddr,
2210a6766df4SSukumar Swaminathan 					    tp->b_rptr + header_len + cp_offset,
2211a6766df4SSukumar Swaminathan 					    size);
2212a6766df4SSukumar Swaminathan 					tp->b_wptr =
2213a6766df4SSukumar Swaminathan 					    tp->b_rptr + size + cp_offset +
2214a6766df4SSukumar Swaminathan 					    header_len;
2215a6766df4SSukumar Swaminathan 					cp_offset += size;
2216a6766df4SSukumar Swaminathan 				}
2217a6766df4SSukumar Swaminathan 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2218a6766df4SSukumar Swaminathan 				mp2 = NULL;
2219accf27a5SSukumar Swaminathan 			} else {
2220accf27a5SSukumar Swaminathan 				if (mp_ial == NULL) {
2221accf27a5SSukumar Swaminathan 					mp_ial = mp2;
2222accf27a5SSukumar Swaminathan 				} else {
2223accf27a5SSukumar Swaminathan 					linkb(mp_ial, mp2);
2224accf27a5SSukumar Swaminathan 				}
2225accf27a5SSukumar Swaminathan 
2226accf27a5SSukumar Swaminathan 				mp2->b_next = NULL;
2227accf27a5SSukumar Swaminathan 				mp2->b_cont = NULL;
2228accf27a5SSukumar Swaminathan 				mp2->b_wptr = mp2->b_rptr + size;
2229accf27a5SSukumar Swaminathan 				/* Flush DMA'd data */
2230accf27a5SSukumar Swaminathan 				(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2231accf27a5SSukumar Swaminathan 				    0, size, DDI_DMA_SYNC_FORKERNEL);
2232accf27a5SSukumar Swaminathan 				QL_PRINT(DBG_RX, ("ial %d payload received \n",
2233accf27a5SSukumar Swaminathan 				    size));
2234accf27a5SSukumar Swaminathan 				QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2235accf27a5SSukumar Swaminathan 				    (uint8_t *)mp2->b_rptr, 8, size);
2236accf27a5SSukumar Swaminathan 			}
2237accf27a5SSukumar Swaminathan 		}
2238accf27a5SSukumar Swaminathan 		if (err_flag != 0) {
2239accf27a5SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2240accf27a5SSukumar Swaminathan 			/* failed on this packet, put it back for re-arming */
2241accf27a5SSukumar Swaminathan 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2242accf27a5SSukumar Swaminathan #endif
2243accf27a5SSukumar Swaminathan 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2244accf27a5SSukumar Swaminathan 		} else {
2245accf27a5SSukumar Swaminathan 			mp2 = mp_ial;
2246accf27a5SSukumar Swaminathan 			freemsg(sbq_desc->mp);
2247bafec742SSukumar Swaminathan 		}
2248bafec742SSukumar Swaminathan 	}
2249bafec742SSukumar Swaminathan 	/*
2250bafec742SSukumar Swaminathan 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2251bafec742SSukumar Swaminathan 	 * concatenate message block mp2 to the tail of message header, mp1
2252bafec742SSukumar Swaminathan 	 */
2253bafec742SSukumar Swaminathan 	if (!err_flag) {
2254a6766df4SSukumar Swaminathan 		if (rx_copy) {
2255a6766df4SSukumar Swaminathan 			if (tp != NULL) {
2256a6766df4SSukumar Swaminathan 				tp->b_next = NULL;
2257a6766df4SSukumar Swaminathan 				tp->b_cont = NULL;
2258a6766df4SSukumar Swaminathan 				tp->b_wptr = tp->b_rptr +
2259a6766df4SSukumar Swaminathan 				    header_len + payload_len;
2260a6766df4SSukumar Swaminathan 			}
2261a6766df4SSukumar Swaminathan 			mp = tp;
2262a6766df4SSukumar Swaminathan 		} else {
2263a6766df4SSukumar Swaminathan 			if (mp1) {
2264a6766df4SSukumar Swaminathan 				if (mp2) {
2265a6766df4SSukumar Swaminathan 					QL_PRINT(DBG_RX,
2266a6766df4SSukumar Swaminathan 					    ("packet in mp1 and mp2\n"));
2267a6766df4SSukumar Swaminathan 					/* mp1->b_cont = mp2; */
2268a6766df4SSukumar Swaminathan 					linkb(mp1, mp2);
2269a6766df4SSukumar Swaminathan 					mp = mp1;
2270a6766df4SSukumar Swaminathan 				} else {
2271a6766df4SSukumar Swaminathan 					QL_PRINT(DBG_RX,
2272a6766df4SSukumar Swaminathan 					    ("packet in mp1 only\n"));
2273a6766df4SSukumar Swaminathan 					mp = mp1;
2274a6766df4SSukumar Swaminathan 				}
2275a6766df4SSukumar Swaminathan 			} else if (mp2) {
2276a6766df4SSukumar Swaminathan 				QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2277a6766df4SSukumar Swaminathan 				mp = mp2;
2278bafec742SSukumar Swaminathan 			}
2279bafec742SSukumar Swaminathan 		}
2280bafec742SSukumar Swaminathan 	}
2281bafec742SSukumar Swaminathan 	return (mp);
2282bafec742SSukumar Swaminathan 
2283accf27a5SSukumar Swaminathan fatal_error:
2284accf27a5SSukumar Swaminathan 	/* fatal Error! */
2285accf27a5SSukumar Swaminathan 	if (qlge->fm_enable) {
2286accf27a5SSukumar Swaminathan 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2287accf27a5SSukumar Swaminathan 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2288accf27a5SSukumar Swaminathan 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2289accf27a5SSukumar Swaminathan 	}
2290a6766df4SSukumar Swaminathan 	if (tp) {
2291a6766df4SSukumar Swaminathan 		freemsg(tp);
2292a6766df4SSukumar Swaminathan 	}
2293a6766df4SSukumar Swaminathan 
2294accf27a5SSukumar Swaminathan 	/* *mp->b_wptr = 0; */
2295accf27a5SSukumar Swaminathan 	ql_wake_asic_reset_soft_intr(qlge);
2296accf27a5SSukumar Swaminathan 	return (NULL);
2297bafec742SSukumar Swaminathan 
2298bafec742SSukumar Swaminathan }
2299bafec742SSukumar Swaminathan 
2300bafec742SSukumar Swaminathan /*
2301bafec742SSukumar Swaminathan  * Bump completion queue consumer index.
2302bafec742SSukumar Swaminathan  */
2303bafec742SSukumar Swaminathan static void
ql_update_cq(struct rx_ring * rx_ring)2304bafec742SSukumar Swaminathan ql_update_cq(struct rx_ring *rx_ring)
2305bafec742SSukumar Swaminathan {
2306bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx++;
2307bafec742SSukumar Swaminathan 	rx_ring->curr_entry++;
2308bafec742SSukumar Swaminathan 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2309bafec742SSukumar Swaminathan 		rx_ring->cnsmr_idx = 0;
2310bafec742SSukumar Swaminathan 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2311bafec742SSukumar Swaminathan 	}
2312bafec742SSukumar Swaminathan }
2313bafec742SSukumar Swaminathan 
2314bafec742SSukumar Swaminathan /*
2315bafec742SSukumar Swaminathan  * Update completion queue consumer index.
2316bafec742SSukumar Swaminathan  */
2317bafec742SSukumar Swaminathan static void
ql_write_cq_idx(struct rx_ring * rx_ring)2318bafec742SSukumar Swaminathan ql_write_cq_idx(struct rx_ring *rx_ring)
2319bafec742SSukumar Swaminathan {
2320bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2321bafec742SSukumar Swaminathan 
2322bafec742SSukumar Swaminathan 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2323bafec742SSukumar Swaminathan 	    rx_ring->cnsmr_idx);
2324bafec742SSukumar Swaminathan }
2325bafec742SSukumar Swaminathan 
2326bafec742SSukumar Swaminathan /*
2327bafec742SSukumar Swaminathan  * Processes a SYS-Chip Event Notification Completion Event.
2328bafec742SSukumar Swaminathan  * The incoming notification event that describes a link up/down
2329bafec742SSukumar Swaminathan  * or some sorts of error happens.
2330bafec742SSukumar Swaminathan  */
2331bafec742SSukumar Swaminathan static void
ql_process_chip_ae_intr(qlge_t * qlge,struct ib_sys_event_iocb_rsp * ib_sys_event_rsp_ptr)2332bafec742SSukumar Swaminathan ql_process_chip_ae_intr(qlge_t *qlge,
2333bafec742SSukumar Swaminathan     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2334bafec742SSukumar Swaminathan {
2335bafec742SSukumar Swaminathan 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2336bafec742SSukumar Swaminathan 	uint32_t soft_req = 0;
2337bafec742SSukumar Swaminathan 
2338bafec742SSukumar Swaminathan 	switch (eventType) {
2339bafec742SSukumar Swaminathan 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2340bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2341bafec742SSukumar Swaminathan 			break;
2342bafec742SSukumar Swaminathan 
2343bafec742SSukumar Swaminathan 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2344bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2345bafec742SSukumar Swaminathan 			break;
2346bafec742SSukumar Swaminathan 
2347bafec742SSukumar Swaminathan 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2348bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2349bafec742SSukumar Swaminathan 			    "occurred");
2350bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2351bafec742SSukumar Swaminathan 			break;
2352bafec742SSukumar Swaminathan 
2353bafec742SSukumar Swaminathan 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2354bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Soft ECC error detected");
2355bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2356bafec742SSukumar Swaminathan 			break;
2357bafec742SSukumar Swaminathan 
2358bafec742SSukumar Swaminathan 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2359bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2360bafec742SSukumar Swaminathan 			    " error occured");
2361bafec742SSukumar Swaminathan 			soft_req |= NEED_MPI_RESET;
2362bafec742SSukumar Swaminathan 			break;
2363bafec742SSukumar Swaminathan 
2364bafec742SSukumar Swaminathan 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2365bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2366bafec742SSukumar Swaminathan 			break;
2367bafec742SSukumar Swaminathan 
2368bafec742SSukumar Swaminathan 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2369bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "PCI Error reading small/large "
2370bafec742SSukumar Swaminathan 			    "buffers occured");
2371bafec742SSukumar Swaminathan 			soft_req |= NEED_HW_RESET;
2372bafec742SSukumar Swaminathan 			break;
2373bafec742SSukumar Swaminathan 
2374bafec742SSukumar Swaminathan 		default:
2375bafec742SSukumar Swaminathan 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2376bafec742SSukumar Swaminathan 			    "type 0x%x occured",
2377bafec742SSukumar Swaminathan 			    __func__, qlge->instance, eventType));
2378bafec742SSukumar Swaminathan 			break;
2379bafec742SSukumar Swaminathan 	}
2380bafec742SSukumar Swaminathan 
2381bafec742SSukumar Swaminathan 	if ((soft_req & NEED_MPI_RESET) != 0) {
2382bafec742SSukumar Swaminathan 		ql_wake_mpi_reset_soft_intr(qlge);
2383accf27a5SSukumar Swaminathan 		if (qlge->fm_enable) {
2384accf27a5SSukumar Swaminathan 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2385accf27a5SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2386accf27a5SSukumar Swaminathan 		}
2387bafec742SSukumar Swaminathan 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2388bafec742SSukumar Swaminathan 		ql_wake_asic_reset_soft_intr(qlge);
2389accf27a5SSukumar Swaminathan 		if (qlge->fm_enable) {
2390accf27a5SSukumar Swaminathan 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2391accf27a5SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2392accf27a5SSukumar Swaminathan 		}
2393bafec742SSukumar Swaminathan 	}
2394bafec742SSukumar Swaminathan }
2395bafec742SSukumar Swaminathan 
2396bafec742SSukumar Swaminathan /*
2397bafec742SSukumar Swaminathan  * set received packet checksum flag
2398bafec742SSukumar Swaminathan  */
2399bafec742SSukumar Swaminathan void
ql_set_rx_cksum(mblk_t * mp,struct ib_mac_iocb_rsp * net_rsp)2400bafec742SSukumar Swaminathan ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2401bafec742SSukumar Swaminathan {
2402bafec742SSukumar Swaminathan 	uint32_t flags;
2403bafec742SSukumar Swaminathan 
2404bafec742SSukumar Swaminathan 	/* Not TCP or UDP packet? nothing more to do */
2405bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2406bafec742SSukumar Swaminathan 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
240715c07adcSJohn Levon 		return;
2408bafec742SSukumar Swaminathan 
2409bafec742SSukumar Swaminathan 	/* No CKO support for IPv6 */
2410bafec742SSukumar Swaminathan 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2411bafec742SSukumar Swaminathan 		return;
2412bafec742SSukumar Swaminathan 
2413bafec742SSukumar Swaminathan 	/*
2414bafec742SSukumar Swaminathan 	 * If checksum error, don't set flags; stack will calculate
2415bafec742SSukumar Swaminathan 	 * checksum, detect the error and update statistics
2416bafec742SSukumar Swaminathan 	 */
2417bafec742SSukumar Swaminathan 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2418bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2419bafec742SSukumar Swaminathan 		return;
2420bafec742SSukumar Swaminathan 
2421bafec742SSukumar Swaminathan 	/* TCP or UDP packet and checksum valid */
2422bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2423bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
24240dc2366fSVenugopal Iyer 		flags = HCK_FULLCKSUM_OK;
24250dc2366fSVenugopal Iyer 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2426bafec742SSukumar Swaminathan 	}
2427bafec742SSukumar Swaminathan 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2428bafec742SSukumar Swaminathan 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
24290dc2366fSVenugopal Iyer 		flags = HCK_FULLCKSUM_OK;
24300dc2366fSVenugopal Iyer 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2431bafec742SSukumar Swaminathan 	}
2432bafec742SSukumar Swaminathan }
2433bafec742SSukumar Swaminathan 
2434bafec742SSukumar Swaminathan /*
2435bafec742SSukumar Swaminathan  * This function goes through h/w descriptor in one specified rx ring,
2436bafec742SSukumar Swaminathan  * receives the data if the descriptor status shows the data is ready.
2437bafec742SSukumar Swaminathan  * It returns a chain of mblks containing the received data, to be
2438bafec742SSukumar Swaminathan  * passed up to mac_rx_ring().
2439bafec742SSukumar Swaminathan  */
2440bafec742SSukumar Swaminathan mblk_t *
ql_ring_rx(struct rx_ring * rx_ring,int poll_bytes)2441bafec742SSukumar Swaminathan ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2442bafec742SSukumar Swaminathan {
2443bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2444accf27a5SSukumar Swaminathan 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2445bafec742SSukumar Swaminathan 	struct ib_mac_iocb_rsp *net_rsp;
2446bafec742SSukumar Swaminathan 	mblk_t *mp;
2447bafec742SSukumar Swaminathan 	mblk_t *mblk_head;
2448bafec742SSukumar Swaminathan 	mblk_t **mblk_tail;
2449bafec742SSukumar Swaminathan 	uint32_t received_bytes = 0;
2450bafec742SSukumar Swaminathan 	uint32_t length;
2451accf27a5SSukumar Swaminathan #ifdef QLGE_PERFORMANCE
2452accf27a5SSukumar Swaminathan 	uint32_t pkt_ct = 0;
2453accf27a5SSukumar Swaminathan #endif
2454bafec742SSukumar Swaminathan 
2455bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
2456bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
2457bafec742SSukumar Swaminathan 	uint32_t producer_idx;
2458bafec742SSukumar Swaminathan 	uint32_t num_free_entries;
2459bafec742SSukumar Swaminathan 	uint32_t temp;
2460bafec742SSukumar Swaminathan 
2461bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2462bafec742SSukumar Swaminathan 	consumer_idx = temp & 0x0000ffff;
2463bafec742SSukumar Swaminathan 	producer_idx = (temp >> 16);
2464bafec742SSukumar Swaminathan 
2465bafec742SSukumar Swaminathan 	if (consumer_idx > producer_idx)
2466bafec742SSukumar Swaminathan 		num_free_entries = (consumer_idx - producer_idx);
2467bafec742SSukumar Swaminathan 	else
2468bafec742SSukumar Swaminathan 		num_free_entries = NUM_RX_RING_ENTRIES - (
2469bafec742SSukumar Swaminathan 		    producer_idx - consumer_idx);
2470bafec742SSukumar Swaminathan 
2471bafec742SSukumar Swaminathan 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2472bafec742SSukumar Swaminathan 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2473bafec742SSukumar Swaminathan 
2474bafec742SSukumar Swaminathan #endif
2475bafec742SSukumar Swaminathan 	mblk_head = NULL;
2476bafec742SSukumar Swaminathan 	mblk_tail = &mblk_head;
2477bafec742SSukumar Swaminathan 
2478accf27a5SSukumar Swaminathan 	while ((prod != rx_ring->cnsmr_idx)) {
2479bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2480bafec742SSukumar Swaminathan 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2481bafec742SSukumar Swaminathan 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2482bafec742SSukumar Swaminathan 
2483bafec742SSukumar Swaminathan 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2484bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2485bafec742SSukumar Swaminathan 		    (off_t)((uintptr_t)net_rsp -
2486bafec742SSukumar Swaminathan 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2487bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2488bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2489bafec742SSukumar Swaminathan 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2490bafec742SSukumar Swaminathan 
2491bafec742SSukumar Swaminathan 		switch (net_rsp->opcode) {
2492bafec742SSukumar Swaminathan 
2493bafec742SSukumar Swaminathan 		case OPCODE_IB_MAC_IOCB:
2494bafec742SSukumar Swaminathan 			/* Adding length of pkt header and payload */
2495bafec742SSukumar Swaminathan 			length = le32_to_cpu(net_rsp->data_len) +
2496bafec742SSukumar Swaminathan 			    le32_to_cpu(net_rsp->hdr_len);
2497bafec742SSukumar Swaminathan 			if ((poll_bytes != QLGE_POLL_ALL) &&
2498bafec742SSukumar Swaminathan 			    ((received_bytes + length) > poll_bytes)) {
2499bafec742SSukumar Swaminathan 				continue;
2500bafec742SSukumar Swaminathan 			}
2501bafec742SSukumar Swaminathan 			received_bytes += length;
2502bafec742SSukumar Swaminathan 
2503accf27a5SSukumar Swaminathan #ifdef QLGE_PERFORMANCE
2504accf27a5SSukumar Swaminathan 			pkt_ct++;
2505accf27a5SSukumar Swaminathan #endif
2506bafec742SSukumar Swaminathan 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2507bafec742SSukumar Swaminathan 			if (mp != NULL) {
2508bafec742SSukumar Swaminathan 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2509bafec742SSukumar Swaminathan 					/*
2510bafec742SSukumar Swaminathan 					 * Increment number of packets we have
2511bafec742SSukumar Swaminathan 					 * indicated to the stack, should be
2512bafec742SSukumar Swaminathan 					 * decremented when we get it back
2513bafec742SSukumar Swaminathan 					 * or when freemsg is called
2514bafec742SSukumar Swaminathan 					 */
2515bafec742SSukumar Swaminathan 					ASSERT(rx_ring->rx_indicate
2516bafec742SSukumar Swaminathan 					    <= rx_ring->cq_len);
2517bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2518bafec742SSukumar Swaminathan 					cmn_err(CE_WARN, "%s do not send to OS,"
2519bafec742SSukumar Swaminathan 					    " mac_flags %d, indicate %d",
2520bafec742SSukumar Swaminathan 					    __func__, rx_ring->mac_flags,
2521bafec742SSukumar Swaminathan 					    rx_ring->rx_indicate);
2522bafec742SSukumar Swaminathan #endif
2523bafec742SSukumar Swaminathan 					QL_PRINT(DBG_RX,
2524bafec742SSukumar Swaminathan 					    ("cq_id = %d, packet "
2525bafec742SSukumar Swaminathan 					    "dropped, mac not "
2526bafec742SSukumar Swaminathan 					    "enabled.\n",
2527bafec742SSukumar Swaminathan 					    rx_ring->cq_id));
2528bafec742SSukumar Swaminathan 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2529bafec742SSukumar Swaminathan 
2530bafec742SSukumar Swaminathan 					/* rx_lock is expected to be held */
2531bafec742SSukumar Swaminathan 					mutex_exit(&rx_ring->rx_lock);
2532bafec742SSukumar Swaminathan 					freemsg(mp);
2533bafec742SSukumar Swaminathan 					mutex_enter(&rx_ring->rx_lock);
2534bafec742SSukumar Swaminathan 					mp = NULL;
2535bafec742SSukumar Swaminathan 				}
2536bafec742SSukumar Swaminathan 
2537bafec742SSukumar Swaminathan 				if (mp != NULL) {
2538bafec742SSukumar Swaminathan 					/*
2539bafec742SSukumar Swaminathan 					 * IP full packet has been
2540bafec742SSukumar Swaminathan 					 * successfully verified by
2541bafec742SSukumar Swaminathan 					 * H/W and is correct
2542bafec742SSukumar Swaminathan 					 */
2543bafec742SSukumar Swaminathan 					ql_set_rx_cksum(mp, net_rsp);
2544bafec742SSukumar Swaminathan 
2545bafec742SSukumar Swaminathan 					rx_ring->rx_packets++;
2546bafec742SSukumar Swaminathan 					rx_ring->rx_bytes += length;
2547bafec742SSukumar Swaminathan 					*mblk_tail = mp;
2548bafec742SSukumar Swaminathan 					mblk_tail = &mp->b_next;
2549bafec742SSukumar Swaminathan 				}
2550bafec742SSukumar Swaminathan 			} else {
2551bafec742SSukumar Swaminathan 				QL_PRINT(DBG_RX,
2552bafec742SSukumar Swaminathan 				    ("cq_id = %d, packet dropped\n",
2553bafec742SSukumar Swaminathan 				    rx_ring->cq_id));
2554bafec742SSukumar Swaminathan 				rx_ring->rx_packets_dropped_no_buffer++;
2555bafec742SSukumar Swaminathan 			}
2556bafec742SSukumar Swaminathan 			break;
2557bafec742SSukumar Swaminathan 
2558bafec742SSukumar Swaminathan 		case OPCODE_IB_SYS_EVENT_IOCB:
2559bafec742SSukumar Swaminathan 			ql_process_chip_ae_intr(qlge,
2560bafec742SSukumar Swaminathan 			    (struct ib_sys_event_iocb_rsp *)
2561bafec742SSukumar Swaminathan 			    net_rsp);
2562bafec742SSukumar Swaminathan 			break;
2563bafec742SSukumar Swaminathan 
2564bafec742SSukumar Swaminathan 		default:
2565bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2566bafec742SSukumar Swaminathan 			    "%s Ring(%d)Hit default case, not handled!"
2567bafec742SSukumar Swaminathan 			    " dropping the packet, "
2568bafec742SSukumar Swaminathan 			    "opcode = %x.", __func__, rx_ring->cq_id,
2569bafec742SSukumar Swaminathan 			    net_rsp->opcode);
2570bafec742SSukumar Swaminathan 			break;
2571bafec742SSukumar Swaminathan 		}
2572bafec742SSukumar Swaminathan 		/* increment cnsmr_idx and curr_entry */
2573bafec742SSukumar Swaminathan 		ql_update_cq(rx_ring);
2574accf27a5SSukumar Swaminathan 		prod = ql_read_sh_reg(qlge, rx_ring);
2575accf27a5SSukumar Swaminathan 
2576accf27a5SSukumar Swaminathan 	}
2577accf27a5SSukumar Swaminathan 
2578accf27a5SSukumar Swaminathan #ifdef QLGE_PERFORMANCE
2579accf27a5SSukumar Swaminathan 	if (pkt_ct >= 7)
2580accf27a5SSukumar Swaminathan 		rx_ring->hist[7]++;
2581accf27a5SSukumar Swaminathan 	else if (pkt_ct == 6)
2582accf27a5SSukumar Swaminathan 		rx_ring->hist[6]++;
2583accf27a5SSukumar Swaminathan 	else if (pkt_ct == 5)
2584accf27a5SSukumar Swaminathan 		rx_ring->hist[5]++;
2585accf27a5SSukumar Swaminathan 	else if (pkt_ct == 4)
2586accf27a5SSukumar Swaminathan 		rx_ring->hist[4]++;
2587accf27a5SSukumar Swaminathan 	else if (pkt_ct == 3)
2588accf27a5SSukumar Swaminathan 		rx_ring->hist[3]++;
2589accf27a5SSukumar Swaminathan 	else if (pkt_ct == 2)
2590accf27a5SSukumar Swaminathan 		rx_ring->hist[2]++;
2591accf27a5SSukumar Swaminathan 	else if (pkt_ct == 1)
2592accf27a5SSukumar Swaminathan 		rx_ring->hist[1]++;
2593accf27a5SSukumar Swaminathan 	else if (pkt_ct == 0)
2594accf27a5SSukumar Swaminathan 		rx_ring->hist[0]++;
2595accf27a5SSukumar Swaminathan #endif
2596bafec742SSukumar Swaminathan 
2597bafec742SSukumar Swaminathan 	/* update cnsmr_idx */
2598bafec742SSukumar Swaminathan 	ql_write_cq_idx(rx_ring);
2599bafec742SSukumar Swaminathan 	/* do not enable interrupt for polling mode */
2600bafec742SSukumar Swaminathan 	if (poll_bytes == QLGE_POLL_ALL)
2601bafec742SSukumar Swaminathan 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2602bafec742SSukumar Swaminathan 	return (mblk_head);
2603bafec742SSukumar Swaminathan }
2604bafec742SSukumar Swaminathan 
2605bafec742SSukumar Swaminathan /* Process an outbound completion from an rx ring. */
2606bafec742SSukumar Swaminathan static void
ql_process_mac_tx_intr(qlge_t * qlge,struct ob_mac_iocb_rsp * mac_rsp)2607bafec742SSukumar Swaminathan ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2608bafec742SSukumar Swaminathan {
2609bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
2610bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
2611bafec742SSukumar Swaminathan 	int j;
2612bafec742SSukumar Swaminathan 
2613bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2614bafec742SSukumar Swaminathan 	tx_ring_desc = tx_ring->wq_desc;
2615bafec742SSukumar Swaminathan 	tx_ring_desc += mac_rsp->tid;
2616bafec742SSukumar Swaminathan 
2617bafec742SSukumar Swaminathan 	if (tx_ring_desc->tx_type == USE_DMA) {
2618bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2619bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
2620bafec742SSukumar Swaminathan 
2621bafec742SSukumar Swaminathan 		/*
2622bafec742SSukumar Swaminathan 		 * Release the DMA resource that is used for
2623bafec742SSukumar Swaminathan 		 * DMA binding.
2624bafec742SSukumar Swaminathan 		 */
2625bafec742SSukumar Swaminathan 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2626bafec742SSukumar Swaminathan 			(void) ddi_dma_unbind_handle(
2627bafec742SSukumar Swaminathan 			    tx_ring_desc->tx_dma_handle[j]);
2628bafec742SSukumar Swaminathan 		}
2629bafec742SSukumar Swaminathan 
2630bafec742SSukumar Swaminathan 		tx_ring_desc->tx_dma_handle_used = 0;
2631bafec742SSukumar Swaminathan 		/*
2632bafec742SSukumar Swaminathan 		 * Free the mblk after sending completed
2633bafec742SSukumar Swaminathan 		 */
2634bafec742SSukumar Swaminathan 		if (tx_ring_desc->mp != NULL) {
2635bafec742SSukumar Swaminathan 			freemsg(tx_ring_desc->mp);
2636bafec742SSukumar Swaminathan 			tx_ring_desc->mp = NULL;
2637bafec742SSukumar Swaminathan 		}
2638bafec742SSukumar Swaminathan 	}
2639bafec742SSukumar Swaminathan 
2640bafec742SSukumar Swaminathan 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2641bafec742SSukumar Swaminathan 	tx_ring->opackets++;
2642bafec742SSukumar Swaminathan 
2643bafec742SSukumar Swaminathan 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2644bafec742SSukumar Swaminathan 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2645bafec742SSukumar Swaminathan 		tx_ring->errxmt++;
2646bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2647bafec742SSukumar Swaminathan 			/* EMPTY */
2648bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2649bafec742SSukumar Swaminathan 			    ("Total descriptor length did not match "
2650bafec742SSukumar Swaminathan 			    "transfer length.\n"));
2651bafec742SSukumar Swaminathan 		}
2652bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2653bafec742SSukumar Swaminathan 			/* EMPTY */
2654bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2655bafec742SSukumar Swaminathan 			    ("Frame too short to be legal, not sent.\n"));
2656bafec742SSukumar Swaminathan 		}
2657bafec742SSukumar Swaminathan 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2658bafec742SSukumar Swaminathan 			/* EMPTY */
2659bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2660bafec742SSukumar Swaminathan 			    ("Frame too long, but sent anyway.\n"));
2661bafec742SSukumar Swaminathan 		}
2662bafec742SSukumar Swaminathan 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2663bafec742SSukumar Swaminathan 			/* EMPTY */
2664bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
2665bafec742SSukumar Swaminathan 			    ("PCI backplane error. Frame not sent.\n"));
2666bafec742SSukumar Swaminathan 		}
2667bafec742SSukumar Swaminathan 	}
2668bafec742SSukumar Swaminathan 	atomic_inc_32(&tx_ring->tx_free_count);
2669bafec742SSukumar Swaminathan }
2670bafec742SSukumar Swaminathan 
2671bafec742SSukumar Swaminathan /*
2672bafec742SSukumar Swaminathan  * clean up tx completion iocbs
2673bafec742SSukumar Swaminathan  */
2674accf27a5SSukumar Swaminathan int
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2675bafec742SSukumar Swaminathan ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2676bafec742SSukumar Swaminathan {
2677bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2678accf27a5SSukumar Swaminathan 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2679bafec742SSukumar Swaminathan 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2680bafec742SSukumar Swaminathan 	int count = 0;
2681bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
2682bafec742SSukumar Swaminathan 	boolean_t resume_tx = B_FALSE;
2683bafec742SSukumar Swaminathan 
2684bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
2685bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
2686bafec742SSukumar Swaminathan 	{
2687bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
2688bafec742SSukumar Swaminathan 	uint32_t producer_idx;
2689bafec742SSukumar Swaminathan 	uint32_t num_free_entries;
2690bafec742SSukumar Swaminathan 	uint32_t temp;
2691bafec742SSukumar Swaminathan 
2692bafec742SSukumar Swaminathan 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2693bafec742SSukumar Swaminathan 	consumer_idx = temp & 0x0000ffff;
2694bafec742SSukumar Swaminathan 	producer_idx = (temp >> 16);
2695bafec742SSukumar Swaminathan 
2696bafec742SSukumar Swaminathan 	if (consumer_idx > producer_idx)
2697bafec742SSukumar Swaminathan 		num_free_entries = (consumer_idx - producer_idx);
2698bafec742SSukumar Swaminathan 	else
2699bafec742SSukumar Swaminathan 		num_free_entries = NUM_RX_RING_ENTRIES -
2700bafec742SSukumar Swaminathan 		    (producer_idx - consumer_idx);
2701bafec742SSukumar Swaminathan 
2702bafec742SSukumar Swaminathan 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2703bafec742SSukumar Swaminathan 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2704bafec742SSukumar Swaminathan 
2705bafec742SSukumar Swaminathan 	}
2706bafec742SSukumar Swaminathan #endif
2707bafec742SSukumar Swaminathan 	/* While there are entries in the completion queue. */
2708bafec742SSukumar Swaminathan 	while (prod != rx_ring->cnsmr_idx) {
2709bafec742SSukumar Swaminathan 
2710bafec742SSukumar Swaminathan 		QL_PRINT(DBG_RX,
2711bafec742SSukumar Swaminathan 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2712bafec742SSukumar Swaminathan 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2713bafec742SSukumar Swaminathan 
2714bafec742SSukumar Swaminathan 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2715bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2716bafec742SSukumar Swaminathan 		    (off_t)((uintptr_t)net_rsp -
2717bafec742SSukumar Swaminathan 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2718bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2719bafec742SSukumar Swaminathan 
2720bafec742SSukumar Swaminathan 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2721bafec742SSukumar Swaminathan 		    "response packet data\n",
2722bafec742SSukumar Swaminathan 		    rx_ring->curr_entry, 8,
2723bafec742SSukumar Swaminathan 		    (size_t)sizeof (*net_rsp));
2724bafec742SSukumar Swaminathan 
2725bafec742SSukumar Swaminathan 		switch (net_rsp->opcode) {
2726bafec742SSukumar Swaminathan 
2727bafec742SSukumar Swaminathan 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2728bafec742SSukumar Swaminathan 		case OPCODE_OB_MAC_IOCB:
2729bafec742SSukumar Swaminathan 			ql_process_mac_tx_intr(qlge, net_rsp);
2730bafec742SSukumar Swaminathan 			break;
2731bafec742SSukumar Swaminathan 
2732bafec742SSukumar Swaminathan 		default:
2733bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2734bafec742SSukumar Swaminathan 			    "%s Hit default case, not handled! "
2735bafec742SSukumar Swaminathan 			    "dropping the packet,"
2736bafec742SSukumar Swaminathan 			    " opcode = %x.",
2737bafec742SSukumar Swaminathan 			    __func__, net_rsp->opcode);
2738bafec742SSukumar Swaminathan 			break;
2739bafec742SSukumar Swaminathan 		}
2740bafec742SSukumar Swaminathan 		count++;
2741bafec742SSukumar Swaminathan 		ql_update_cq(rx_ring);
2742accf27a5SSukumar Swaminathan 		prod = ql_read_sh_reg(qlge, rx_ring);
2743bafec742SSukumar Swaminathan 	}
2744bafec742SSukumar Swaminathan 	ql_write_cq_idx(rx_ring);
2745bafec742SSukumar Swaminathan 
2746bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
2747bafec742SSukumar Swaminathan 
2748accf27a5SSukumar Swaminathan 	net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2749bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2750bafec742SSukumar Swaminathan 
2751bafec742SSukumar Swaminathan 	mutex_enter(&tx_ring->tx_lock);
2752bafec742SSukumar Swaminathan 
2753bafec742SSukumar Swaminathan 	if (tx_ring->queue_stopped &&
2754bafec742SSukumar Swaminathan 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2755bafec742SSukumar Swaminathan 		/*
2756bafec742SSukumar Swaminathan 		 * The queue got stopped because the tx_ring was full.
2757bafec742SSukumar Swaminathan 		 * Wake it up, because it's now at least 25% empty.
2758bafec742SSukumar Swaminathan 		 */
2759bafec742SSukumar Swaminathan 		tx_ring->queue_stopped = 0;
2760bafec742SSukumar Swaminathan 		resume_tx = B_TRUE;
2761bafec742SSukumar Swaminathan 	}
2762bafec742SSukumar Swaminathan 
2763bafec742SSukumar Swaminathan 	mutex_exit(&tx_ring->tx_lock);
2764bafec742SSukumar Swaminathan 	/* Don't hold the lock during OS callback */
2765bafec742SSukumar Swaminathan 	if (resume_tx)
2766bafec742SSukumar Swaminathan 		RESUME_TX(tx_ring);
2767bafec742SSukumar Swaminathan 	return (count);
2768bafec742SSukumar Swaminathan }
2769bafec742SSukumar Swaminathan 
2770bafec742SSukumar Swaminathan /*
2771bafec742SSukumar Swaminathan  * reset asic when error happens
2772bafec742SSukumar Swaminathan  */
2773bafec742SSukumar Swaminathan /* ARGSUSED */
2774bafec742SSukumar Swaminathan static uint_t
ql_asic_reset_work(caddr_t arg1,caddr_t arg2)2775bafec742SSukumar Swaminathan ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2776bafec742SSukumar Swaminathan {
2777bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2778bafec742SSukumar Swaminathan 	int status;
2779bafec742SSukumar Swaminathan 
2780bafec742SSukumar Swaminathan 	mutex_enter(&qlge->gen_mutex);
2781accf27a5SSukumar Swaminathan 	(void) ql_do_stop(qlge);
2782accf27a5SSukumar Swaminathan 	/*
2783accf27a5SSukumar Swaminathan 	 * Write default ethernet address to chip register Mac
2784accf27a5SSukumar Swaminathan 	 * Address slot 0 and Enable Primary Mac Function.
2785accf27a5SSukumar Swaminathan 	 */
2786accf27a5SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
2787accf27a5SSukumar Swaminathan 	(void) ql_unicst_set(qlge,
2788accf27a5SSukumar Swaminathan 	    (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
2789accf27a5SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
2790accf27a5SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_INIT;
2791accf27a5SSukumar Swaminathan 	status = ql_do_start(qlge);
2792bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
2793bafec742SSukumar Swaminathan 		goto error;
2794accf27a5SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STARTED;
2795bafec742SSukumar Swaminathan 	mutex_exit(&qlge->gen_mutex);
2796accf27a5SSukumar Swaminathan 	ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
2797accf27a5SSukumar Swaminathan 
2798bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2799bafec742SSukumar Swaminathan 
2800bafec742SSukumar Swaminathan error:
2801bafec742SSukumar Swaminathan 	mutex_exit(&qlge->gen_mutex);
2802bafec742SSukumar Swaminathan 	cmn_err(CE_WARN,
2803bafec742SSukumar Swaminathan 	    "qlge up/down cycle failed, closing device");
2804accf27a5SSukumar Swaminathan 	if (qlge->fm_enable) {
2805accf27a5SSukumar Swaminathan 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2806accf27a5SSukumar Swaminathan 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
2807accf27a5SSukumar Swaminathan 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2808accf27a5SSukumar Swaminathan 	}
2809bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2810bafec742SSukumar Swaminathan }
2811bafec742SSukumar Swaminathan 
2812bafec742SSukumar Swaminathan /*
2813bafec742SSukumar Swaminathan  * Reset MPI
2814bafec742SSukumar Swaminathan  */
2815bafec742SSukumar Swaminathan /* ARGSUSED */
2816bafec742SSukumar Swaminathan static uint_t
ql_mpi_reset_work(caddr_t arg1,caddr_t arg2)2817bafec742SSukumar Swaminathan ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2818bafec742SSukumar Swaminathan {
2819bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2820bafec742SSukumar Swaminathan 
28210662fbf4SSukumar Swaminathan 	(void) ql_reset_mpi_risc(qlge);
2822bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2823bafec742SSukumar Swaminathan }
2824bafec742SSukumar Swaminathan 
2825bafec742SSukumar Swaminathan /*
2826bafec742SSukumar Swaminathan  * Process MPI mailbox messages
2827bafec742SSukumar Swaminathan  */
2828bafec742SSukumar Swaminathan /* ARGSUSED */
2829bafec742SSukumar Swaminathan static uint_t
ql_mpi_event_work(caddr_t arg1,caddr_t arg2)2830bafec742SSukumar Swaminathan ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2831bafec742SSukumar Swaminathan {
2832bafec742SSukumar Swaminathan 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2833bafec742SSukumar Swaminathan 
2834bafec742SSukumar Swaminathan 	ql_do_mpi_intr(qlge);
2835bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
2836bafec742SSukumar Swaminathan }
2837bafec742SSukumar Swaminathan 
2838bafec742SSukumar Swaminathan /* Fire up a handler to reset the MPI processor. */
2839bafec742SSukumar Swaminathan void
ql_wake_asic_reset_soft_intr(qlge_t * qlge)2840bafec742SSukumar Swaminathan ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2841bafec742SSukumar Swaminathan {
2842bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2843bafec742SSukumar Swaminathan }
2844bafec742SSukumar Swaminathan 
2845bafec742SSukumar Swaminathan static void
ql_wake_mpi_reset_soft_intr(qlge_t * qlge)2846bafec742SSukumar Swaminathan ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2847bafec742SSukumar Swaminathan {
2848bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2849bafec742SSukumar Swaminathan }
2850bafec742SSukumar Swaminathan 
2851bafec742SSukumar Swaminathan static void
ql_wake_mpi_event_soft_intr(qlge_t * qlge)2852bafec742SSukumar Swaminathan ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2853bafec742SSukumar Swaminathan {
2854bafec742SSukumar Swaminathan 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2855bafec742SSukumar Swaminathan }
2856bafec742SSukumar Swaminathan 
2857bafec742SSukumar Swaminathan /*
2858bafec742SSukumar Swaminathan  * This handles a fatal error, MPI activity, and the default
2859bafec742SSukumar Swaminathan  * rx_ring in an MSI-X multiple interrupt vector environment.
2860bafec742SSukumar Swaminathan  * In MSI/Legacy environment it also process the rest of
2861bafec742SSukumar Swaminathan  * the rx_rings.
2862bafec742SSukumar Swaminathan  */
2863bafec742SSukumar Swaminathan /* ARGSUSED */
2864bafec742SSukumar Swaminathan static uint_t
ql_isr(caddr_t arg1,caddr_t arg2)2865bafec742SSukumar Swaminathan ql_isr(caddr_t arg1, caddr_t arg2)
2866bafec742SSukumar Swaminathan {
2867bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2868accf27a5SSukumar Swaminathan 	struct rx_ring *ob_ring;
2869bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
2870bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2871bafec742SSukumar Swaminathan 	uint32_t var, prod;
2872bafec742SSukumar Swaminathan 	int i;
2873bafec742SSukumar Swaminathan 	int work_done = 0;
2874bafec742SSukumar Swaminathan 
2875bafec742SSukumar Swaminathan 	mblk_t *mp;
2876bafec742SSukumar Swaminathan 
2877bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
2878bafec742SSukumar Swaminathan 
2879bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
2880bafec742SSukumar Swaminathan 
2881bafec742SSukumar Swaminathan 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2882bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2883bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2884bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2885bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2886bafec742SSukumar Swaminathan 		return (DDI_INTR_CLAIMED);
2887bafec742SSukumar Swaminathan 	}
2888bafec742SSukumar Swaminathan 
2889bafec742SSukumar Swaminathan 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2890bafec742SSukumar Swaminathan 
2891accf27a5SSukumar Swaminathan 	/*
2892accf27a5SSukumar Swaminathan 	 * process send completes on first stride tx ring if available
2893accf27a5SSukumar Swaminathan 	 */
2894accf27a5SSukumar Swaminathan 	if (qlge->isr_stride) {
2895accf27a5SSukumar Swaminathan 		ob_ring = &qlge->rx_ring[qlge->isr_stride];
2896accf27a5SSukumar Swaminathan 		if (ql_read_sh_reg(qlge, ob_ring) !=
2897accf27a5SSukumar Swaminathan 		    ob_ring->cnsmr_idx) {
2898accf27a5SSukumar Swaminathan 			(void) ql_clean_outbound_rx_ring(ob_ring);
2899accf27a5SSukumar Swaminathan 		}
2900accf27a5SSukumar Swaminathan 	}
2901bafec742SSukumar Swaminathan 	/*
2902bafec742SSukumar Swaminathan 	 * Check the default queue and wake handler if active.
2903bafec742SSukumar Swaminathan 	 */
2904bafec742SSukumar Swaminathan 	rx_ring = &qlge->rx_ring[0];
2905accf27a5SSukumar Swaminathan 	prod = ql_read_sh_reg(qlge, rx_ring);
2906bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2907bafec742SSukumar Swaminathan 	    prod, rx_ring->cnsmr_idx));
2908bafec742SSukumar Swaminathan 	/* check if interrupt is due to incoming packet */
2909bafec742SSukumar Swaminathan 	if (prod != rx_ring->cnsmr_idx) {
2910bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2911bafec742SSukumar Swaminathan 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2912bafec742SSukumar Swaminathan 		mutex_enter(&rx_ring->rx_lock);
2913bafec742SSukumar Swaminathan 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2914bafec742SSukumar Swaminathan 		mutex_exit(&rx_ring->rx_lock);
2915bafec742SSukumar Swaminathan 
2916bafec742SSukumar Swaminathan 		if (mp != NULL)
2917bafec742SSukumar Swaminathan 			RX_UPSTREAM(rx_ring, mp);
2918bafec742SSukumar Swaminathan 		work_done++;
2919bafec742SSukumar Swaminathan 	} else {
2920bafec742SSukumar Swaminathan 		/*
2921bafec742SSukumar Swaminathan 		 * If interrupt is not due to incoming packet, read status
2922bafec742SSukumar Swaminathan 		 * register to see if error happens or mailbox interrupt.
2923bafec742SSukumar Swaminathan 		 */
2924bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
2925bafec742SSukumar Swaminathan 		if ((var & STATUS_FE) != 0) {
2926bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2927accf27a5SSukumar Swaminathan 			if (qlge->fm_enable) {
2928accf27a5SSukumar Swaminathan 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2929accf27a5SSukumar Swaminathan 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2930accf27a5SSukumar Swaminathan 				ddi_fm_service_impact(qlge->dip,
2931accf27a5SSukumar Swaminathan 				    DDI_SERVICE_LOST);
2932accf27a5SSukumar Swaminathan 			}
2933bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2934bafec742SSukumar Swaminathan 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2935bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
2936bafec742SSukumar Swaminathan 			    "Resetting chip. Error Status Register = 0x%x",
2937bafec742SSukumar Swaminathan 			    var);
2938bafec742SSukumar Swaminathan 			ql_wake_asic_reset_soft_intr(qlge);
2939bafec742SSukumar Swaminathan 			return (DDI_INTR_CLAIMED);
2940bafec742SSukumar Swaminathan 		}
2941bafec742SSukumar Swaminathan 
2942bafec742SSukumar Swaminathan 		/*
2943bafec742SSukumar Swaminathan 		 * Check MPI processor activity.
2944bafec742SSukumar Swaminathan 		 */
2945bafec742SSukumar Swaminathan 		if ((var & STATUS_PI) != 0) {
2946bafec742SSukumar Swaminathan 			/*
2947bafec742SSukumar Swaminathan 			 * We've got an async event or mailbox completion.
2948bafec742SSukumar Swaminathan 			 * Handle it and clear the source of the interrupt.
2949bafec742SSukumar Swaminathan 			 */
2950bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2951bafec742SSukumar Swaminathan 
2952bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2953bafec742SSukumar Swaminathan 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2954bafec742SSukumar Swaminathan 			ql_wake_mpi_event_soft_intr(qlge);
2955bafec742SSukumar Swaminathan 			work_done++;
2956bafec742SSukumar Swaminathan 		}
2957bafec742SSukumar Swaminathan 	}
2958bafec742SSukumar Swaminathan 
2959accf27a5SSukumar Swaminathan 
2960bafec742SSukumar Swaminathan 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2961bafec742SSukumar Swaminathan 		/*
2962bafec742SSukumar Swaminathan 		 * Start the DPC for each active queue.
2963bafec742SSukumar Swaminathan 		 */
2964bafec742SSukumar Swaminathan 		for (i = 1; i < qlge->rx_ring_count; i++) {
2965bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i];
2966bafec742SSukumar Swaminathan 
2967accf27a5SSukumar Swaminathan 			if (ql_read_sh_reg(qlge, rx_ring) !=
2968bafec742SSukumar Swaminathan 			    rx_ring->cnsmr_idx) {
2969bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INTR,
2970bafec742SSukumar Swaminathan 				    ("Waking handler for rx_ring[%d].\n", i));
2971bafec742SSukumar Swaminathan 
2972bafec742SSukumar Swaminathan 				ql_disable_completion_interrupt(qlge,
2973bafec742SSukumar Swaminathan 				    rx_ring->irq);
2974bafec742SSukumar Swaminathan 				if (rx_ring->type == TX_Q) {
29750662fbf4SSukumar Swaminathan 					(void) ql_clean_outbound_rx_ring(
29760662fbf4SSukumar Swaminathan 					    rx_ring);
2977bafec742SSukumar Swaminathan 					ql_enable_completion_interrupt(
2978bafec742SSukumar Swaminathan 					    rx_ring->qlge, rx_ring->irq);
2979bafec742SSukumar Swaminathan 				} else {
2980bafec742SSukumar Swaminathan 					mutex_enter(&rx_ring->rx_lock);
2981bafec742SSukumar Swaminathan 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2982bafec742SSukumar Swaminathan 					mutex_exit(&rx_ring->rx_lock);
2983bafec742SSukumar Swaminathan 					if (mp != NULL)
2984bafec742SSukumar Swaminathan 						RX_UPSTREAM(rx_ring, mp);
2985bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
2986bafec742SSukumar Swaminathan 					if (rx_ring->mac_flags ==
2987bafec742SSukumar Swaminathan 					    QL_MAC_STOPPED)
2988bafec742SSukumar Swaminathan 						cmn_err(CE_NOTE,
2989bafec742SSukumar Swaminathan 						    "%s rx_indicate(%d) %d\n",
2990bafec742SSukumar Swaminathan 						    __func__, i,
2991bafec742SSukumar Swaminathan 						    rx_ring->rx_indicate);
2992bafec742SSukumar Swaminathan #endif
2993bafec742SSukumar Swaminathan 				}
2994bafec742SSukumar Swaminathan 				work_done++;
2995bafec742SSukumar Swaminathan 			}
2996bafec742SSukumar Swaminathan 		}
2997bafec742SSukumar Swaminathan 	}
2998bafec742SSukumar Swaminathan 
2999bafec742SSukumar Swaminathan 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
3000bafec742SSukumar Swaminathan 
3001bafec742SSukumar Swaminathan 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
3002bafec742SSukumar Swaminathan }
3003bafec742SSukumar Swaminathan 
3004bafec742SSukumar Swaminathan /*
3005bafec742SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
3006bafec742SSukumar Swaminathan  */
3007bafec742SSukumar Swaminathan /* ARGSUSED */
3008bafec742SSukumar Swaminathan static uint_t
ql_msix_tx_isr(caddr_t arg1,caddr_t arg2)3009bafec742SSukumar Swaminathan ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
3010bafec742SSukumar Swaminathan {
3011bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3012bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
3013bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
3014bafec742SSukumar Swaminathan 
3015bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
30160662fbf4SSukumar Swaminathan 	(void) ql_clean_outbound_rx_ring(rx_ring);
3017bafec742SSukumar Swaminathan 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
3018bafec742SSukumar Swaminathan 
3019bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
3020bafec742SSukumar Swaminathan }
3021bafec742SSukumar Swaminathan 
3022accf27a5SSukumar Swaminathan /*
3023accf27a5SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler
3024accf27a5SSukumar Swaminathan  */
3025accf27a5SSukumar Swaminathan /* ARGSUSED */
3026accf27a5SSukumar Swaminathan static uint_t
ql_msix_isr(caddr_t arg1,caddr_t arg2)3027accf27a5SSukumar Swaminathan ql_msix_isr(caddr_t arg1, caddr_t arg2)
3028accf27a5SSukumar Swaminathan {
3029accf27a5SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3030accf27a5SSukumar Swaminathan 	struct rx_ring *ob_ring;
3031accf27a5SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
3032accf27a5SSukumar Swaminathan 	mblk_t *mp;
3033accf27a5SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
3034accf27a5SSukumar Swaminathan 
3035accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3036accf27a5SSukumar Swaminathan 
3037accf27a5SSukumar Swaminathan 	ql_disable_completion_interrupt(qlge, rx_ring->irq);
3038accf27a5SSukumar Swaminathan 
3039accf27a5SSukumar Swaminathan 	/*
3040accf27a5SSukumar Swaminathan 	 * process send completes on stride tx ring if available
3041accf27a5SSukumar Swaminathan 	 */
3042accf27a5SSukumar Swaminathan 	if (qlge->isr_stride) {
3043accf27a5SSukumar Swaminathan 		ob_ring = rx_ring + qlge->isr_stride;
3044accf27a5SSukumar Swaminathan 		if (ql_read_sh_reg(qlge, ob_ring) !=
3045accf27a5SSukumar Swaminathan 		    ob_ring->cnsmr_idx) {
3046accf27a5SSukumar Swaminathan 			++qlge->rx_interrupts[ob_ring->cq_id];
3047accf27a5SSukumar Swaminathan 			(void) ql_clean_outbound_rx_ring(ob_ring);
3048accf27a5SSukumar Swaminathan 		}
3049accf27a5SSukumar Swaminathan 	}
3050accf27a5SSukumar Swaminathan 
3051accf27a5SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
3052accf27a5SSukumar Swaminathan 
3053accf27a5SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
3054accf27a5SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3055accf27a5SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
3056accf27a5SSukumar Swaminathan 
3057accf27a5SSukumar Swaminathan 	if (mp != NULL)
3058accf27a5SSukumar Swaminathan 		RX_UPSTREAM(rx_ring, mp);
3059accf27a5SSukumar Swaminathan 
3060accf27a5SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
3061accf27a5SSukumar Swaminathan }
3062accf27a5SSukumar Swaminathan 
3063bafec742SSukumar Swaminathan /*
3064bafec742SSukumar Swaminathan  * Poll n_bytes of chained incoming packets
3065bafec742SSukumar Swaminathan  */
3066bafec742SSukumar Swaminathan mblk_t *
ql_ring_rx_poll(void * arg,int n_bytes)3067bafec742SSukumar Swaminathan ql_ring_rx_poll(void *arg, int n_bytes)
3068bafec742SSukumar Swaminathan {
3069bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
3070bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
3071bafec742SSukumar Swaminathan 	mblk_t *mp = NULL;
3072bafec742SSukumar Swaminathan 	uint32_t var;
3073bafec742SSukumar Swaminathan 
3074bafec742SSukumar Swaminathan 	ASSERT(n_bytes >= 0);
3075bafec742SSukumar Swaminathan 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
3076bafec742SSukumar Swaminathan 	    __func__, rx_ring->cq_id, n_bytes));
3077bafec742SSukumar Swaminathan 
3078bafec742SSukumar Swaminathan 	++qlge->rx_polls[rx_ring->cq_id];
3079bafec742SSukumar Swaminathan 
3080bafec742SSukumar Swaminathan 	if (n_bytes == 0)
3081bafec742SSukumar Swaminathan 		return (mp);
3082bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
3083bafec742SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, n_bytes);
3084bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
3085bafec742SSukumar Swaminathan 
3086bafec742SSukumar Swaminathan 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
3087bafec742SSukumar Swaminathan 		var = ql_read_reg(qlge, REG_STATUS);
3088bafec742SSukumar Swaminathan 		/*
3089bafec742SSukumar Swaminathan 		 * Check for fatal error.
3090bafec742SSukumar Swaminathan 		 */
3091bafec742SSukumar Swaminathan 		if ((var & STATUS_FE) != 0) {
3092bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
3093bafec742SSukumar Swaminathan 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
3094bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Got fatal error %x.", var);
3095bafec742SSukumar Swaminathan 			ql_wake_asic_reset_soft_intr(qlge);
3096accf27a5SSukumar Swaminathan 			if (qlge->fm_enable) {
3097accf27a5SSukumar Swaminathan 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
3098accf27a5SSukumar Swaminathan 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
3099accf27a5SSukumar Swaminathan 				ddi_fm_service_impact(qlge->dip,
3100accf27a5SSukumar Swaminathan 				    DDI_SERVICE_LOST);
3101accf27a5SSukumar Swaminathan 			}
3102bafec742SSukumar Swaminathan 		}
3103bafec742SSukumar Swaminathan 		/*
3104bafec742SSukumar Swaminathan 		 * Check MPI processor activity.
3105bafec742SSukumar Swaminathan 		 */
3106bafec742SSukumar Swaminathan 		if ((var & STATUS_PI) != 0) {
3107bafec742SSukumar Swaminathan 			/*
3108bafec742SSukumar Swaminathan 			 * We've got an async event or mailbox completion.
3109bafec742SSukumar Swaminathan 			 * Handle it and clear the source of the interrupt.
3110bafec742SSukumar Swaminathan 			 */
3111bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
3112bafec742SSukumar Swaminathan 			ql_do_mpi_intr(qlge);
3113bafec742SSukumar Swaminathan 		}
3114bafec742SSukumar Swaminathan 	}
3115bafec742SSukumar Swaminathan 
3116bafec742SSukumar Swaminathan 	return (mp);
3117bafec742SSukumar Swaminathan }
3118bafec742SSukumar Swaminathan 
3119bafec742SSukumar Swaminathan /*
3120bafec742SSukumar Swaminathan  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
3121bafec742SSukumar Swaminathan  */
3122bafec742SSukumar Swaminathan /* ARGSUSED */
3123bafec742SSukumar Swaminathan static uint_t
ql_msix_rx_isr(caddr_t arg1,caddr_t arg2)3124bafec742SSukumar Swaminathan ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
3125bafec742SSukumar Swaminathan {
3126bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3127bafec742SSukumar Swaminathan 	qlge_t *qlge = rx_ring->qlge;
3128bafec742SSukumar Swaminathan 	mblk_t *mp;
3129bafec742SSukumar Swaminathan 	_NOTE(ARGUNUSED(arg2));
3130bafec742SSukumar Swaminathan 
3131bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3132bafec742SSukumar Swaminathan 
3133bafec742SSukumar Swaminathan 	++qlge->rx_interrupts[rx_ring->cq_id];
3134bafec742SSukumar Swaminathan 
3135bafec742SSukumar Swaminathan 	mutex_enter(&rx_ring->rx_lock);
3136bafec742SSukumar Swaminathan 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3137bafec742SSukumar Swaminathan 	mutex_exit(&rx_ring->rx_lock);
3138bafec742SSukumar Swaminathan 
3139bafec742SSukumar Swaminathan 	if (mp != NULL)
3140bafec742SSukumar Swaminathan 		RX_UPSTREAM(rx_ring, mp);
3141bafec742SSukumar Swaminathan 
3142bafec742SSukumar Swaminathan 	return (DDI_INTR_CLAIMED);
3143bafec742SSukumar Swaminathan }
3144bafec742SSukumar Swaminathan 
3145bafec742SSukumar Swaminathan 
3146bafec742SSukumar Swaminathan /*
3147bafec742SSukumar Swaminathan  *
3148bafec742SSukumar Swaminathan  * Allocate DMA Buffer for ioctl service
3149bafec742SSukumar Swaminathan  *
3150bafec742SSukumar Swaminathan  */
3151bafec742SSukumar Swaminathan static int
ql_alloc_ioctl_dma_buf(qlge_t * qlge)3152bafec742SSukumar Swaminathan ql_alloc_ioctl_dma_buf(qlge_t *qlge)
3153bafec742SSukumar Swaminathan {
3154bafec742SSukumar Swaminathan 	uint64_t phy_addr;
3155bafec742SSukumar Swaminathan 	uint64_t alloc_size;
3156bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3157bafec742SSukumar Swaminathan 
3158bafec742SSukumar Swaminathan 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
3159bafec742SSukumar Swaminathan 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
3160bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
3161bafec742SSukumar Swaminathan 	    &ql_buf_acc_attr,
3162bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3163bafec742SSukumar Swaminathan 	    &qlge->ioctl_buf_dma_attr.acc_handle,
3164bafec742SSukumar Swaminathan 	    (size_t)alloc_size,  /* mem size */
3165bafec742SSukumar Swaminathan 	    (size_t)0,  /* alignment */
3166bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
3167bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3168bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
3169bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3170bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3171bafec742SSukumar Swaminathan 	}
3172bafec742SSukumar Swaminathan 
3173bafec742SSukumar Swaminathan 	phy_addr = dma_cookie.dmac_laddress;
3174bafec742SSukumar Swaminathan 
3175bafec742SSukumar Swaminathan 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
3176bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
3177bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3178bafec742SSukumar Swaminathan 	}
3179bafec742SSukumar Swaminathan 
3180bafec742SSukumar Swaminathan 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
3181bafec742SSukumar Swaminathan 
3182bafec742SSukumar Swaminathan 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
3183bafec742SSukumar Swaminathan 	    "phy_addr = 0x%lx\n",
3184bafec742SSukumar Swaminathan 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
3185bafec742SSukumar Swaminathan 
3186bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3187bafec742SSukumar Swaminathan }
3188bafec742SSukumar Swaminathan 
3189bafec742SSukumar Swaminathan 
3190bafec742SSukumar Swaminathan /*
3191bafec742SSukumar Swaminathan  * Function to free physical memory.
3192bafec742SSukumar Swaminathan  */
3193bafec742SSukumar Swaminathan static void
ql_free_phys(ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * acc_handle)3194bafec742SSukumar Swaminathan ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
3195bafec742SSukumar Swaminathan {
3196cddcb3daSSukumar Swaminathan 	if (*dma_handle != NULL) {
3197bafec742SSukumar Swaminathan 		(void) ddi_dma_unbind_handle(*dma_handle);
3198cddcb3daSSukumar Swaminathan 		if (*acc_handle != NULL)
3199bafec742SSukumar Swaminathan 			ddi_dma_mem_free(acc_handle);
3200bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3201cddcb3daSSukumar Swaminathan 		*acc_handle = NULL;
3202cddcb3daSSukumar Swaminathan 		*dma_handle = NULL;
3203bafec742SSukumar Swaminathan 	}
3204bafec742SSukumar Swaminathan }
3205bafec742SSukumar Swaminathan 
3206bafec742SSukumar Swaminathan /*
3207bafec742SSukumar Swaminathan  * Function to free ioctl dma buffer.
3208bafec742SSukumar Swaminathan  */
3209bafec742SSukumar Swaminathan static void
ql_free_ioctl_dma_buf(qlge_t * qlge)3210bafec742SSukumar Swaminathan ql_free_ioctl_dma_buf(qlge_t *qlge)
3211bafec742SSukumar Swaminathan {
3212bafec742SSukumar Swaminathan 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
3213bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
3214bafec742SSukumar Swaminathan 		    &qlge->ioctl_buf_dma_attr.acc_handle);
3215bafec742SSukumar Swaminathan 
3216bafec742SSukumar Swaminathan 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
3217bafec742SSukumar Swaminathan 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
3218bafec742SSukumar Swaminathan 	}
3219bafec742SSukumar Swaminathan }
3220bafec742SSukumar Swaminathan 
3221bafec742SSukumar Swaminathan /*
3222bafec742SSukumar Swaminathan  * Free shadow register space used for request and completion queues
3223bafec742SSukumar Swaminathan  */
3224bafec742SSukumar Swaminathan static void
ql_free_shadow_space(qlge_t * qlge)3225bafec742SSukumar Swaminathan ql_free_shadow_space(qlge_t *qlge)
3226bafec742SSukumar Swaminathan {
3227bafec742SSukumar Swaminathan 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
3228bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3229bafec742SSukumar Swaminathan 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
3230bafec742SSukumar Swaminathan 		bzero(&qlge->host_copy_shadow_dma_attr,
3231bafec742SSukumar Swaminathan 		    sizeof (qlge->host_copy_shadow_dma_attr));
3232bafec742SSukumar Swaminathan 	}
3233bafec742SSukumar Swaminathan 
3234bafec742SSukumar Swaminathan 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
3235bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3236bafec742SSukumar Swaminathan 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
3237bafec742SSukumar Swaminathan 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3238bafec742SSukumar Swaminathan 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3239bafec742SSukumar Swaminathan 	}
3240bafec742SSukumar Swaminathan }
3241bafec742SSukumar Swaminathan 
3242bafec742SSukumar Swaminathan /*
3243bafec742SSukumar Swaminathan  * Allocate shadow register space for request and completion queues
3244bafec742SSukumar Swaminathan  */
3245bafec742SSukumar Swaminathan static int
ql_alloc_shadow_space(qlge_t * qlge)3246bafec742SSukumar Swaminathan ql_alloc_shadow_space(qlge_t *qlge)
3247bafec742SSukumar Swaminathan {
3248bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3249bafec742SSukumar Swaminathan 
3250bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip,
3251bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
3252bafec742SSukumar Swaminathan 	    &ql_dev_acc_attr,
3253bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3254bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
3255bafec742SSukumar Swaminathan 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3256bafec742SSukumar Swaminathan 	    (size_t)4, /* 4 bytes alignment */
3257bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
3258bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3259bafec742SSukumar Swaminathan 		bzero(&qlge->host_copy_shadow_dma_attr,
3260bafec742SSukumar Swaminathan 		    sizeof (qlge->host_copy_shadow_dma_attr));
3261bafec742SSukumar Swaminathan 
3262bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
3263bafec742SSukumar Swaminathan 		    "response shadow registers", __func__, qlge->instance);
3264bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3265bafec742SSukumar Swaminathan 	}
3266bafec742SSukumar Swaminathan 
3267bafec742SSukumar Swaminathan 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3268bafec742SSukumar Swaminathan 
3269bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip,
3270bafec742SSukumar Swaminathan 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3271bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3272bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3273bafec742SSukumar Swaminathan 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
3274bafec742SSukumar Swaminathan 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3275bafec742SSukumar Swaminathan 	    (size_t)4, /* 4 bytes alignment */
3276bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
3277bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3278bafec742SSukumar Swaminathan 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3279bafec742SSukumar Swaminathan 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3280bafec742SSukumar Swaminathan 
3281bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
3282bafec742SSukumar Swaminathan 		    "for request shadow registers",
3283bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3284bafec742SSukumar Swaminathan 		goto err_wqp_sh_area;
3285bafec742SSukumar Swaminathan 	}
3286bafec742SSukumar Swaminathan 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3287bafec742SSukumar Swaminathan 
3288bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3289bafec742SSukumar Swaminathan 
3290bafec742SSukumar Swaminathan err_wqp_sh_area:
3291bafec742SSukumar Swaminathan 	ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3292bafec742SSukumar Swaminathan 	    &qlge->host_copy_shadow_dma_attr.acc_handle);
3293bafec742SSukumar Swaminathan 	bzero(&qlge->host_copy_shadow_dma_attr,
3294bafec742SSukumar Swaminathan 	    sizeof (qlge->host_copy_shadow_dma_attr));
3295bafec742SSukumar Swaminathan 
3296bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3297bafec742SSukumar Swaminathan }
3298bafec742SSukumar Swaminathan 
3299bafec742SSukumar Swaminathan /*
3300bafec742SSukumar Swaminathan  * Initialize a tx ring
3301bafec742SSukumar Swaminathan  */
3302bafec742SSukumar Swaminathan static void
ql_init_tx_ring(struct tx_ring * tx_ring)3303bafec742SSukumar Swaminathan ql_init_tx_ring(struct tx_ring *tx_ring)
3304bafec742SSukumar Swaminathan {
3305bafec742SSukumar Swaminathan 	int i;
3306bafec742SSukumar Swaminathan 	struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
3307bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
3308bafec742SSukumar Swaminathan 
3309bafec742SSukumar Swaminathan 	for (i = 0; i < tx_ring->wq_len; i++) {
3310bafec742SSukumar Swaminathan 		tx_ring_desc->index = i;
3311bafec742SSukumar Swaminathan 		tx_ring_desc->queue_entry = mac_iocb_ptr;
3312bafec742SSukumar Swaminathan 		mac_iocb_ptr++;
3313bafec742SSukumar Swaminathan 		tx_ring_desc++;
3314bafec742SSukumar Swaminathan 	}
3315bafec742SSukumar Swaminathan 	tx_ring->tx_free_count = tx_ring->wq_len;
3316bafec742SSukumar Swaminathan 	tx_ring->queue_stopped = 0;
3317bafec742SSukumar Swaminathan }
3318bafec742SSukumar Swaminathan 
3319bafec742SSukumar Swaminathan /*
3320bafec742SSukumar Swaminathan  * Free one tx ring resources
3321bafec742SSukumar Swaminathan  */
3322bafec742SSukumar Swaminathan static void
ql_free_tx_resources(struct tx_ring * tx_ring)3323bafec742SSukumar Swaminathan ql_free_tx_resources(struct tx_ring *tx_ring)
3324bafec742SSukumar Swaminathan {
3325bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
3326bafec742SSukumar Swaminathan 	int i, j;
3327bafec742SSukumar Swaminathan 
3328cddcb3daSSukumar Swaminathan 	if (tx_ring->wq_dma.dma_handle != NULL) {
3329cddcb3daSSukumar Swaminathan 		ql_free_phys(&tx_ring->wq_dma.dma_handle,
3330cddcb3daSSukumar Swaminathan 		    &tx_ring->wq_dma.acc_handle);
3331cddcb3daSSukumar Swaminathan 		bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3332cddcb3daSSukumar Swaminathan 	}
3333bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc != NULL) {
3334bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
3335bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3336bafec742SSukumar Swaminathan 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3337bafec742SSukumar Swaminathan 				if (tx_ring_desc->tx_dma_handle[j]) {
3338bafec742SSukumar Swaminathan 					/*
3339bafec742SSukumar Swaminathan 					 * The unbinding will happen in tx
3340bafec742SSukumar Swaminathan 					 * completion, here we just free the
3341bafec742SSukumar Swaminathan 					 * handles
3342bafec742SSukumar Swaminathan 					 */
3343bafec742SSukumar Swaminathan 					ddi_dma_free_handle(
3344bafec742SSukumar Swaminathan 					    &(tx_ring_desc->tx_dma_handle[j]));
3345bafec742SSukumar Swaminathan 					tx_ring_desc->tx_dma_handle[j] = NULL;
3346bafec742SSukumar Swaminathan 				}
3347bafec742SSukumar Swaminathan 			}
3348bafec742SSukumar Swaminathan 			if (tx_ring_desc->oal != NULL) {
3349bafec742SSukumar Swaminathan 				tx_ring_desc->oal_dma_addr = 0;
3350bafec742SSukumar Swaminathan 				tx_ring_desc->oal = NULL;
3351bafec742SSukumar Swaminathan 				tx_ring_desc->copy_buffer = NULL;
3352bafec742SSukumar Swaminathan 				tx_ring_desc->copy_buffer_dma_addr = 0;
3353bafec742SSukumar Swaminathan 
3354bafec742SSukumar Swaminathan 				ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3355bafec742SSukumar Swaminathan 				    &tx_ring_desc->oal_dma.acc_handle);
3356bafec742SSukumar Swaminathan 			}
3357bafec742SSukumar Swaminathan 		}
3358bafec742SSukumar Swaminathan 		kmem_free(tx_ring->wq_desc,
3359bafec742SSukumar Swaminathan 		    tx_ring->wq_len * sizeof (struct tx_ring_desc));
3360bafec742SSukumar Swaminathan 		tx_ring->wq_desc = NULL;
3361bafec742SSukumar Swaminathan 	}
3362bafec742SSukumar Swaminathan 	/* free the wqicb struct */
3363bafec742SSukumar Swaminathan 	if (tx_ring->wqicb_dma.dma_handle) {
3364bafec742SSukumar Swaminathan 		ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3365bafec742SSukumar Swaminathan 		    &tx_ring->wqicb_dma.acc_handle);
3366bafec742SSukumar Swaminathan 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3367bafec742SSukumar Swaminathan 	}
3368bafec742SSukumar Swaminathan }
3369bafec742SSukumar Swaminathan 
3370bafec742SSukumar Swaminathan /*
3371bafec742SSukumar Swaminathan  * Allocate work (request) queue memory and transmit
3372bafec742SSukumar Swaminathan  * descriptors for this transmit ring
3373bafec742SSukumar Swaminathan  */
3374bafec742SSukumar Swaminathan static int
ql_alloc_tx_resources(qlge_t * qlge,struct tx_ring * tx_ring)3375bafec742SSukumar Swaminathan ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3376bafec742SSukumar Swaminathan {
3377bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3378bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
3379bafec742SSukumar Swaminathan 	int i, j;
3380bafec742SSukumar Swaminathan 	uint32_t length;
3381bafec742SSukumar Swaminathan 
3382bafec742SSukumar Swaminathan 	/* allocate dma buffers for obiocbs */
3383bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3384bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3385bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3386bafec742SSukumar Swaminathan 	    &tx_ring->wq_dma.acc_handle,
3387bafec742SSukumar Swaminathan 	    (size_t)tx_ring->wq_size,	/* mem size */
3388bafec742SSukumar Swaminathan 	    (size_t)128, /* alignment:128 bytes boundary */
3389bafec742SSukumar Swaminathan 	    (caddr_t *)&tx_ring->wq_dma.vaddr,
3390bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3391*ce17336eSAndy Fiddaman 		bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3392bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3393bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3394bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3395bafec742SSukumar Swaminathan 	}
3396bafec742SSukumar Swaminathan 	tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3397bafec742SSukumar Swaminathan 
3398bafec742SSukumar Swaminathan 	tx_ring->wq_desc =
3399bafec742SSukumar Swaminathan 	    kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3400bafec742SSukumar Swaminathan 	    KM_NOSLEEP);
3401bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc == NULL) {
3402bafec742SSukumar Swaminathan 		goto err;
3403bafec742SSukumar Swaminathan 	} else {
3404bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
3405bafec742SSukumar Swaminathan 		/*
3406bafec742SSukumar Swaminathan 		 * Allocate a large enough structure to hold the following
3407bafec742SSukumar Swaminathan 		 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3408bafec742SSukumar Swaminathan 		 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3409bafec742SSukumar Swaminathan 		 */
3410accf27a5SSukumar Swaminathan 		length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3411accf27a5SSukumar Swaminathan 		    + QL_MAX_COPY_LENGTH;
3412bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3413bafec742SSukumar Swaminathan 
3414bafec742SSukumar Swaminathan 			if (ql_alloc_phys(qlge->dip,
3415bafec742SSukumar Swaminathan 			    &tx_ring_desc->oal_dma.dma_handle,
3416bafec742SSukumar Swaminathan 			    &ql_desc_acc_attr,
3417bafec742SSukumar Swaminathan 			    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3418bafec742SSukumar Swaminathan 			    &tx_ring_desc->oal_dma.acc_handle,
3419bafec742SSukumar Swaminathan 			    (size_t)length,	/* mem size */
3420bafec742SSukumar Swaminathan 			    (size_t)0, /* default alignment:8 bytes boundary */
3421bafec742SSukumar Swaminathan 			    (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3422bafec742SSukumar Swaminathan 			    &dma_cookie) != 0) {
3423bafec742SSukumar Swaminathan 				bzero(&tx_ring_desc->oal_dma,
3424bafec742SSukumar Swaminathan 				    sizeof (tx_ring_desc->oal_dma));
3425bafec742SSukumar Swaminathan 				cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3426bafec742SSukumar Swaminathan 				    "oal alloc failed.",
3427bafec742SSukumar Swaminathan 				    __func__, qlge->instance);
3428cddcb3daSSukumar Swaminathan 				goto err;
3429bafec742SSukumar Swaminathan 			}
3430bafec742SSukumar Swaminathan 
3431bafec742SSukumar Swaminathan 			tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3432bafec742SSukumar Swaminathan 			tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3433bafec742SSukumar Swaminathan 			tx_ring_desc->copy_buffer =
3434bafec742SSukumar Swaminathan 			    (caddr_t)((uint8_t *)tx_ring_desc->oal
3435bafec742SSukumar Swaminathan 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3436bafec742SSukumar Swaminathan 			tx_ring_desc->copy_buffer_dma_addr =
3437bafec742SSukumar Swaminathan 			    (tx_ring_desc->oal_dma_addr
3438bafec742SSukumar Swaminathan 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3439bafec742SSukumar Swaminathan 
3440bafec742SSukumar Swaminathan 			/* Allocate dma handles for transmit buffers */
3441bafec742SSukumar Swaminathan 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3442bafec742SSukumar Swaminathan 				if (ddi_dma_alloc_handle(qlge->dip,
3443bafec742SSukumar Swaminathan 				    &tx_mapping_dma_attr,
3444bafec742SSukumar Swaminathan 				    DDI_DMA_DONTWAIT,
3445bafec742SSukumar Swaminathan 				    0, &tx_ring_desc->tx_dma_handle[j])
3446bafec742SSukumar Swaminathan 				    != DDI_SUCCESS) {
3447cddcb3daSSukumar Swaminathan 					tx_ring_desc->tx_dma_handle[j] = NULL;
3448bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
3449bafec742SSukumar Swaminathan 					    "!%s: ddi_dma_alloc_handle: "
3450bafec742SSukumar Swaminathan 					    "tx_dma_handle "
3451bafec742SSukumar Swaminathan 					    "alloc failed", __func__);
3452cddcb3daSSukumar Swaminathan 					ql_free_phys(
3453cddcb3daSSukumar Swaminathan 					    &tx_ring_desc->oal_dma.dma_handle,
3454cddcb3daSSukumar Swaminathan 					    &tx_ring_desc->oal_dma.acc_handle);
3455bafec742SSukumar Swaminathan 					goto err;
3456bafec742SSukumar Swaminathan 				}
3457bafec742SSukumar Swaminathan 			}
3458bafec742SSukumar Swaminathan 		}
3459bafec742SSukumar Swaminathan 	}
3460bafec742SSukumar Swaminathan 	/* alloc a wqicb control block to load this tx ring to hw */
3461bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3462bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3463bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3464bafec742SSukumar Swaminathan 	    &tx_ring->wqicb_dma.acc_handle,
3465bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct wqicb_t),	/* mem size */
3466bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3467bafec742SSukumar Swaminathan 	    (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3468bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3469bafec742SSukumar Swaminathan 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3470bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3471bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3472cddcb3daSSukumar Swaminathan 		goto err;
3473bafec742SSukumar Swaminathan 	}
3474bafec742SSukumar Swaminathan 	tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3475bafec742SSukumar Swaminathan 
3476bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3477bafec742SSukumar Swaminathan 
3478bafec742SSukumar Swaminathan err:
3479bafec742SSukumar Swaminathan 	ql_free_tx_resources(tx_ring);
3480bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3481bafec742SSukumar Swaminathan }
3482bafec742SSukumar Swaminathan 
3483bafec742SSukumar Swaminathan /*
3484bafec742SSukumar Swaminathan  * Free one rx ring resources
3485bafec742SSukumar Swaminathan  */
3486bafec742SSukumar Swaminathan static void
ql_free_rx_resources(struct rx_ring * rx_ring)3487bafec742SSukumar Swaminathan ql_free_rx_resources(struct rx_ring *rx_ring)
3488bafec742SSukumar Swaminathan {
3489bafec742SSukumar Swaminathan 	/* Free the small buffer queue. */
3490bafec742SSukumar Swaminathan 	if (rx_ring->sbq_dma.dma_handle) {
3491bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3492bafec742SSukumar Swaminathan 		    &rx_ring->sbq_dma.acc_handle);
3493bafec742SSukumar Swaminathan 		bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3494bafec742SSukumar Swaminathan 	}
3495bafec742SSukumar Swaminathan 
3496bafec742SSukumar Swaminathan 	/* Free the small buffer queue control blocks. */
3497cddcb3daSSukumar Swaminathan 	if (rx_ring->sbq_desc != NULL) {
3498cddcb3daSSukumar Swaminathan 		kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3499cddcb3daSSukumar Swaminathan 		    sizeof (struct bq_desc));
3500cddcb3daSSukumar Swaminathan 		rx_ring->sbq_desc = NULL;
3501cddcb3daSSukumar Swaminathan 	}
3502bafec742SSukumar Swaminathan 
3503bafec742SSukumar Swaminathan 	/* Free the large buffer queue. */
3504bafec742SSukumar Swaminathan 	if (rx_ring->lbq_dma.dma_handle) {
3505bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3506bafec742SSukumar Swaminathan 		    &rx_ring->lbq_dma.acc_handle);
3507bafec742SSukumar Swaminathan 		bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3508bafec742SSukumar Swaminathan 	}
3509bafec742SSukumar Swaminathan 
3510bafec742SSukumar Swaminathan 	/* Free the large buffer queue control blocks. */
3511cddcb3daSSukumar Swaminathan 	if (rx_ring->lbq_desc != NULL) {
3512cddcb3daSSukumar Swaminathan 		kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3513cddcb3daSSukumar Swaminathan 		    sizeof (struct bq_desc));
3514cddcb3daSSukumar Swaminathan 		rx_ring->lbq_desc = NULL;
3515cddcb3daSSukumar Swaminathan 	}
3516bafec742SSukumar Swaminathan 
3517bafec742SSukumar Swaminathan 	/* Free cqicb struct */
3518bafec742SSukumar Swaminathan 	if (rx_ring->cqicb_dma.dma_handle) {
3519bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3520bafec742SSukumar Swaminathan 		    &rx_ring->cqicb_dma.acc_handle);
3521bafec742SSukumar Swaminathan 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3522bafec742SSukumar Swaminathan 	}
3523bafec742SSukumar Swaminathan 	/* Free the rx queue. */
3524bafec742SSukumar Swaminathan 	if (rx_ring->cq_dma.dma_handle) {
3525bafec742SSukumar Swaminathan 		ql_free_phys(&rx_ring->cq_dma.dma_handle,
3526bafec742SSukumar Swaminathan 		    &rx_ring->cq_dma.acc_handle);
3527bafec742SSukumar Swaminathan 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3528bafec742SSukumar Swaminathan 	}
3529bafec742SSukumar Swaminathan }
3530bafec742SSukumar Swaminathan 
3531bafec742SSukumar Swaminathan /*
3532bafec742SSukumar Swaminathan  * Allocate queues and buffers for this completions queue based
3533bafec742SSukumar Swaminathan  * on the values in the parameter structure.
3534bafec742SSukumar Swaminathan  */
3535bafec742SSukumar Swaminathan static int
ql_alloc_rx_resources(qlge_t * qlge,struct rx_ring * rx_ring)3536bafec742SSukumar Swaminathan ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3537bafec742SSukumar Swaminathan {
3538bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3539bafec742SSukumar Swaminathan 
3540bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3541bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3542bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3543bafec742SSukumar Swaminathan 	    &rx_ring->cq_dma.acc_handle,
3544bafec742SSukumar Swaminathan 	    (size_t)rx_ring->cq_size,  /* mem size */
3545bafec742SSukumar Swaminathan 	    (size_t)128, /* alignment:128 bytes boundary */
3546bafec742SSukumar Swaminathan 	    (caddr_t *)&rx_ring->cq_dma.vaddr,
3547bafec742SSukumar Swaminathan 	    &dma_cookie) != 0)	{
3548bafec742SSukumar Swaminathan 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3549bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3550bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3551bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3552bafec742SSukumar Swaminathan 	}
3553bafec742SSukumar Swaminathan 	rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3554bafec742SSukumar Swaminathan 
3555bafec742SSukumar Swaminathan 	if (rx_ring->sbq_len != 0) {
3556bafec742SSukumar Swaminathan 		/*
3557bafec742SSukumar Swaminathan 		 * Allocate small buffer queue.
3558bafec742SSukumar Swaminathan 		 */
3559bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3560bafec742SSukumar Swaminathan 		    &ql_desc_acc_attr,
3561bafec742SSukumar Swaminathan 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3562bafec742SSukumar Swaminathan 		    &rx_ring->sbq_dma.acc_handle,
3563bafec742SSukumar Swaminathan 		    (size_t)rx_ring->sbq_size,  /* mem size */
3564bafec742SSukumar Swaminathan 		    (size_t)128, /* alignment:128 bytes boundary */
3565bafec742SSukumar Swaminathan 		    (caddr_t *)&rx_ring->sbq_dma.vaddr,
3566bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
3567bafec742SSukumar Swaminathan 			bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3568bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3569bafec742SSukumar Swaminathan 			    "%s(%d): small buffer queue allocation failed.",
3570bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3571bafec742SSukumar Swaminathan 			goto err_mem;
3572bafec742SSukumar Swaminathan 		}
3573bafec742SSukumar Swaminathan 		rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3574bafec742SSukumar Swaminathan 
3575bafec742SSukumar Swaminathan 		/*
3576bafec742SSukumar Swaminathan 		 * Allocate small buffer queue control blocks.
3577bafec742SSukumar Swaminathan 		 */
3578bafec742SSukumar Swaminathan 		rx_ring->sbq_desc =
3579bafec742SSukumar Swaminathan 		    kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3580bafec742SSukumar Swaminathan 		    KM_NOSLEEP);
3581bafec742SSukumar Swaminathan 		if (rx_ring->sbq_desc == NULL) {
3582bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3583bafec742SSukumar Swaminathan 			    "sbq control block allocation failed.");
3584bafec742SSukumar Swaminathan 			goto err_mem;
3585bafec742SSukumar Swaminathan 		}
3586bafec742SSukumar Swaminathan 
3587bafec742SSukumar Swaminathan 		ql_init_sbq_ring(rx_ring);
3588bafec742SSukumar Swaminathan 	}
3589bafec742SSukumar Swaminathan 
3590bafec742SSukumar Swaminathan 	if (rx_ring->lbq_len != 0) {
3591bafec742SSukumar Swaminathan 		/*
3592bafec742SSukumar Swaminathan 		 * Allocate large buffer queue.
3593bafec742SSukumar Swaminathan 		 */
3594bafec742SSukumar Swaminathan 		if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3595bafec742SSukumar Swaminathan 		    &ql_desc_acc_attr,
3596bafec742SSukumar Swaminathan 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3597bafec742SSukumar Swaminathan 		    &rx_ring->lbq_dma.acc_handle,
3598bafec742SSukumar Swaminathan 		    (size_t)rx_ring->lbq_size,  /* mem size */
3599bafec742SSukumar Swaminathan 		    (size_t)128, /* alignment:128 bytes boundary */
3600bafec742SSukumar Swaminathan 		    (caddr_t *)&rx_ring->lbq_dma.vaddr,
3601bafec742SSukumar Swaminathan 		    &dma_cookie) != 0) {
3602bafec742SSukumar Swaminathan 			bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3603bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3604bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
3605bafec742SSukumar Swaminathan 			goto err_mem;
3606bafec742SSukumar Swaminathan 		}
3607bafec742SSukumar Swaminathan 		rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3608bafec742SSukumar Swaminathan 
3609bafec742SSukumar Swaminathan 		/*
3610bafec742SSukumar Swaminathan 		 * Allocate large buffer queue control blocks.
3611bafec742SSukumar Swaminathan 		 */
3612bafec742SSukumar Swaminathan 		rx_ring->lbq_desc =
3613bafec742SSukumar Swaminathan 		    kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3614bafec742SSukumar Swaminathan 		    KM_NOSLEEP);
3615bafec742SSukumar Swaminathan 		if (rx_ring->lbq_desc == NULL) {
3616bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
3617bafec742SSukumar Swaminathan 			    "Large buffer queue control block allocation "
3618bafec742SSukumar Swaminathan 			    "failed.");
3619bafec742SSukumar Swaminathan 			goto err_mem;
3620bafec742SSukumar Swaminathan 		}
3621bafec742SSukumar Swaminathan 		ql_init_lbq_ring(rx_ring);
3622bafec742SSukumar Swaminathan 	}
3623bafec742SSukumar Swaminathan 
3624bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3625bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3626bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3627bafec742SSukumar Swaminathan 	    &rx_ring->cqicb_dma.acc_handle,
3628bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct cqicb_t),  /* mem size */
3629bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3630bafec742SSukumar Swaminathan 	    (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3631bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3632bafec742SSukumar Swaminathan 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3633bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3634bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3635cddcb3daSSukumar Swaminathan 		goto err_mem;
3636bafec742SSukumar Swaminathan 	}
3637bafec742SSukumar Swaminathan 	rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3638bafec742SSukumar Swaminathan 
3639bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3640bafec742SSukumar Swaminathan 
3641bafec742SSukumar Swaminathan err_mem:
3642bafec742SSukumar Swaminathan 	ql_free_rx_resources(rx_ring);
3643bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3644bafec742SSukumar Swaminathan }
3645bafec742SSukumar Swaminathan 
3646bafec742SSukumar Swaminathan /*
3647bafec742SSukumar Swaminathan  * Frees tx/rx queues memory resources
3648bafec742SSukumar Swaminathan  */
3649bafec742SSukumar Swaminathan static void
ql_free_mem_resources(qlge_t * qlge)3650bafec742SSukumar Swaminathan ql_free_mem_resources(qlge_t *qlge)
3651bafec742SSukumar Swaminathan {
3652bafec742SSukumar Swaminathan 	int i;
3653bafec742SSukumar Swaminathan 
3654bafec742SSukumar Swaminathan 	if (qlge->ricb_dma.dma_handle) {
3655bafec742SSukumar Swaminathan 		/* free the ricb struct */
3656bafec742SSukumar Swaminathan 		ql_free_phys(&qlge->ricb_dma.dma_handle,
3657bafec742SSukumar Swaminathan 		    &qlge->ricb_dma.acc_handle);
3658bafec742SSukumar Swaminathan 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3659bafec742SSukumar Swaminathan 	}
3660bafec742SSukumar Swaminathan 
3661bafec742SSukumar Swaminathan 	ql_free_rx_buffers(qlge);
3662bafec742SSukumar Swaminathan 
3663bafec742SSukumar Swaminathan 	ql_free_ioctl_dma_buf(qlge);
3664bafec742SSukumar Swaminathan 
3665bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++)
3666bafec742SSukumar Swaminathan 		ql_free_tx_resources(&qlge->tx_ring[i]);
3667bafec742SSukumar Swaminathan 
3668bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++)
3669bafec742SSukumar Swaminathan 		ql_free_rx_resources(&qlge->rx_ring[i]);
3670bafec742SSukumar Swaminathan 
3671bafec742SSukumar Swaminathan 	ql_free_shadow_space(qlge);
3672bafec742SSukumar Swaminathan }
3673bafec742SSukumar Swaminathan 
3674bafec742SSukumar Swaminathan /*
3675bafec742SSukumar Swaminathan  * Allocate buffer queues, large buffers and small buffers etc
3676bafec742SSukumar Swaminathan  *
3677bafec742SSukumar Swaminathan  * This API is called in the gld_attach member function. It is called
3678bafec742SSukumar Swaminathan  * only once.  Later reset,reboot should not re-allocate all rings and
3679bafec742SSukumar Swaminathan  * buffers.
3680bafec742SSukumar Swaminathan  */
3681bafec742SSukumar Swaminathan static int
ql_alloc_mem_resources(qlge_t * qlge)3682bafec742SSukumar Swaminathan ql_alloc_mem_resources(qlge_t *qlge)
3683bafec742SSukumar Swaminathan {
3684bafec742SSukumar Swaminathan 	int i;
3685bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
3686bafec742SSukumar Swaminathan 
3687bafec742SSukumar Swaminathan 	/* Allocate space for our shadow registers */
3688bafec742SSukumar Swaminathan 	if (ql_alloc_shadow_space(qlge))
3689bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3690bafec742SSukumar Swaminathan 
3691bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
3692bafec742SSukumar Swaminathan 		if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3693bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "RX resource allocation failed.");
3694bafec742SSukumar Swaminathan 			goto err_mem;
3695bafec742SSukumar Swaminathan 		}
3696bafec742SSukumar Swaminathan 	}
3697bafec742SSukumar Swaminathan 	/* Allocate tx queue resources */
3698bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
3699bafec742SSukumar Swaminathan 		if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3700bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Tx resource allocation failed.");
3701bafec742SSukumar Swaminathan 			goto err_mem;
3702bafec742SSukumar Swaminathan 		}
3703bafec742SSukumar Swaminathan 	}
3704bafec742SSukumar Swaminathan 
3705bafec742SSukumar Swaminathan 	if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3706bafec742SSukumar Swaminathan 		goto err_mem;
3707bafec742SSukumar Swaminathan 	}
3708bafec742SSukumar Swaminathan 
3709bafec742SSukumar Swaminathan 	if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3710bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3711bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3712bafec742SSukumar Swaminathan 		goto err_mem;
3713bafec742SSukumar Swaminathan 	}
3714bafec742SSukumar Swaminathan 
3715bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_ALLOC_RX_BUF;
3716bafec742SSukumar Swaminathan 
3717bafec742SSukumar Swaminathan 	if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3718bafec742SSukumar Swaminathan 	    &ql_desc_acc_attr,
3719bafec742SSukumar Swaminathan 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3720bafec742SSukumar Swaminathan 	    &qlge->ricb_dma.acc_handle,
3721bafec742SSukumar Swaminathan 	    (size_t)sizeof (struct ricb),  /* mem size */
3722bafec742SSukumar Swaminathan 	    (size_t)0, /* alignment:128 bytes boundary */
3723bafec742SSukumar Swaminathan 	    (caddr_t *)&qlge->ricb_dma.vaddr,
3724bafec742SSukumar Swaminathan 	    &dma_cookie) != 0) {
3725bafec742SSukumar Swaminathan 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3726bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3727bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
3728cddcb3daSSukumar Swaminathan 		goto err_mem;
3729bafec742SSukumar Swaminathan 	}
3730bafec742SSukumar Swaminathan 	qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3731bafec742SSukumar Swaminathan 
3732bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
3733bafec742SSukumar Swaminathan 
3734bafec742SSukumar Swaminathan err_mem:
3735cddcb3daSSukumar Swaminathan 	ql_free_mem_resources(qlge);
3736bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
3737bafec742SSukumar Swaminathan }
3738bafec742SSukumar Swaminathan 
3739bafec742SSukumar Swaminathan 
3740bafec742SSukumar Swaminathan /*
3741bafec742SSukumar Swaminathan  * Function used to allocate physical memory and zero it.
3742bafec742SSukumar Swaminathan  */
3743bafec742SSukumar Swaminathan 
3744bafec742SSukumar Swaminathan static int
ql_alloc_phys_rbuf(dev_info_t * dip,ddi_dma_handle_t * dma_handle,ddi_device_acc_attr_t * device_acc_attr,uint_t dma_flags,ddi_acc_handle_t * acc_handle,size_t size,size_t alignment,caddr_t * vaddr,ddi_dma_cookie_t * dma_cookie)3745accf27a5SSukumar Swaminathan ql_alloc_phys_rbuf(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3746bafec742SSukumar Swaminathan     ddi_device_acc_attr_t *device_acc_attr,
3747bafec742SSukumar Swaminathan     uint_t dma_flags,
3748bafec742SSukumar Swaminathan     ddi_acc_handle_t *acc_handle,
3749bafec742SSukumar Swaminathan     size_t size,
3750bafec742SSukumar Swaminathan     size_t alignment,
3751bafec742SSukumar Swaminathan     caddr_t *vaddr,
3752bafec742SSukumar Swaminathan     ddi_dma_cookie_t *dma_cookie)
3753bafec742SSukumar Swaminathan {
3754bafec742SSukumar Swaminathan 	size_t rlen;
3755bafec742SSukumar Swaminathan 	uint_t cnt;
3756bafec742SSukumar Swaminathan 
3757bafec742SSukumar Swaminathan 	/*
3758bafec742SSukumar Swaminathan 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3759bafec742SSukumar Swaminathan 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3760bafec742SSukumar Swaminathan 	 * to make sure buffer has enough room for overrun.
3761bafec742SSukumar Swaminathan 	 */
3762bafec742SSukumar Swaminathan 	if (size & 7) {
3763bafec742SSukumar Swaminathan 		size += 8 - (size & 7);
3764bafec742SSukumar Swaminathan 	}
3765bafec742SSukumar Swaminathan 
3766bafec742SSukumar Swaminathan 	/* Adjust the alignment if requested */
3767bafec742SSukumar Swaminathan 	if (alignment) {
3768bafec742SSukumar Swaminathan 		dma_attr.dma_attr_align = alignment;
3769bafec742SSukumar Swaminathan 	}
3770bafec742SSukumar Swaminathan 
3771bafec742SSukumar Swaminathan 	/*
3772bafec742SSukumar Swaminathan 	 * Allocate DMA handle
3773bafec742SSukumar Swaminathan 	 */
3774accf27a5SSukumar Swaminathan 	if (ddi_dma_alloc_handle(dip, &dma_attr_rbuf, DDI_DMA_DONTWAIT, NULL,
3775bafec742SSukumar Swaminathan 	    dma_handle) != DDI_SUCCESS) {
3776bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3777bafec742SSukumar Swaminathan 		    __func__);
3778cddcb3daSSukumar Swaminathan 		*dma_handle = NULL;
3779bafec742SSukumar Swaminathan 		return (QL_ERROR);
3780bafec742SSukumar Swaminathan 	}
3781bafec742SSukumar Swaminathan 	/*
3782bafec742SSukumar Swaminathan 	 * Allocate DMA memory
3783bafec742SSukumar Swaminathan 	 */
3784bafec742SSukumar Swaminathan 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3785accf27a5SSukumar Swaminathan 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3786accf27a5SSukumar Swaminathan 	    DDI_DMA_DONTWAIT,
3787bafec742SSukumar Swaminathan 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3788accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3789bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3790cddcb3daSSukumar Swaminathan 		*acc_handle = NULL;
3791cddcb3daSSukumar Swaminathan 		*dma_handle = NULL;
3792accf27a5SSukumar Swaminathan 		return (QL_ERROR);
3793bafec742SSukumar Swaminathan 	}
3794accf27a5SSukumar Swaminathan 
3795accf27a5SSukumar Swaminathan 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3796accf27a5SSukumar Swaminathan 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3797accf27a5SSukumar Swaminathan 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3798accf27a5SSukumar Swaminathan 		ddi_dma_mem_free(acc_handle);
3799accf27a5SSukumar Swaminathan 
3800accf27a5SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3801accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3802accf27a5SSukumar Swaminathan 		    __func__);
3803cddcb3daSSukumar Swaminathan 		*acc_handle = NULL;
3804cddcb3daSSukumar Swaminathan 		*dma_handle = NULL;
3805accf27a5SSukumar Swaminathan 		return (QL_ERROR);
3806accf27a5SSukumar Swaminathan 	}
3807accf27a5SSukumar Swaminathan 
3808accf27a5SSukumar Swaminathan 	if (cnt != 1) {
3809accf27a5SSukumar Swaminathan 
3810accf27a5SSukumar Swaminathan 		ql_free_phys(dma_handle, acc_handle);
3811accf27a5SSukumar Swaminathan 
3812accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3813accf27a5SSukumar Swaminathan 		    __func__);
3814accf27a5SSukumar Swaminathan 		return (QL_ERROR);
3815accf27a5SSukumar Swaminathan 	}
3816accf27a5SSukumar Swaminathan 
3817accf27a5SSukumar Swaminathan 	bzero((caddr_t)*vaddr, rlen);
3818accf27a5SSukumar Swaminathan 
3819accf27a5SSukumar Swaminathan 	return (0);
3820accf27a5SSukumar Swaminathan }
3821accf27a5SSukumar Swaminathan 
3822accf27a5SSukumar Swaminathan /*
3823accf27a5SSukumar Swaminathan  * Function used to allocate physical memory and zero it.
3824accf27a5SSukumar Swaminathan  */
3825accf27a5SSukumar Swaminathan static int
ql_alloc_phys(dev_info_t * dip,ddi_dma_handle_t * dma_handle,ddi_device_acc_attr_t * device_acc_attr,uint_t dma_flags,ddi_acc_handle_t * acc_handle,size_t size,size_t alignment,caddr_t * vaddr,ddi_dma_cookie_t * dma_cookie)3826accf27a5SSukumar Swaminathan ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3827accf27a5SSukumar Swaminathan     ddi_device_acc_attr_t *device_acc_attr,
3828accf27a5SSukumar Swaminathan     uint_t dma_flags,
3829accf27a5SSukumar Swaminathan     ddi_acc_handle_t *acc_handle,
3830accf27a5SSukumar Swaminathan     size_t size,
3831accf27a5SSukumar Swaminathan     size_t alignment,
3832accf27a5SSukumar Swaminathan     caddr_t *vaddr,
3833accf27a5SSukumar Swaminathan     ddi_dma_cookie_t *dma_cookie)
3834accf27a5SSukumar Swaminathan {
3835accf27a5SSukumar Swaminathan 	size_t rlen;
3836accf27a5SSukumar Swaminathan 	uint_t cnt;
3837accf27a5SSukumar Swaminathan 
3838accf27a5SSukumar Swaminathan 	/*
3839accf27a5SSukumar Swaminathan 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3840accf27a5SSukumar Swaminathan 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3841accf27a5SSukumar Swaminathan 	 * to make sure buffer has enough room for overrun.
3842accf27a5SSukumar Swaminathan 	 */
3843accf27a5SSukumar Swaminathan 	if (size & 7) {
3844accf27a5SSukumar Swaminathan 		size += 8 - (size & 7);
3845accf27a5SSukumar Swaminathan 	}
3846accf27a5SSukumar Swaminathan 
3847accf27a5SSukumar Swaminathan 	/* Adjust the alignment if requested */
3848accf27a5SSukumar Swaminathan 	if (alignment) {
3849accf27a5SSukumar Swaminathan 		dma_attr.dma_attr_align = alignment;
3850accf27a5SSukumar Swaminathan 	}
3851accf27a5SSukumar Swaminathan 
3852accf27a5SSukumar Swaminathan 	/*
3853accf27a5SSukumar Swaminathan 	 * Allocate DMA handle
3854accf27a5SSukumar Swaminathan 	 */
3855accf27a5SSukumar Swaminathan 	if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_DONTWAIT, NULL,
3856accf27a5SSukumar Swaminathan 	    dma_handle) != DDI_SUCCESS) {
3857accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3858accf27a5SSukumar Swaminathan 		    __func__);
3859cddcb3daSSukumar Swaminathan 		*dma_handle = NULL;
3860accf27a5SSukumar Swaminathan 		return (QL_ERROR);
3861accf27a5SSukumar Swaminathan 	}
3862accf27a5SSukumar Swaminathan 	/*
3863accf27a5SSukumar Swaminathan 	 * Allocate DMA memory
3864accf27a5SSukumar Swaminathan 	 */
3865accf27a5SSukumar Swaminathan 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3866accf27a5SSukumar Swaminathan 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3867accf27a5SSukumar Swaminathan 	    DDI_DMA_DONTWAIT,
3868accf27a5SSukumar Swaminathan 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3869accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3870bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3871cddcb3daSSukumar Swaminathan 		*acc_handle = NULL;
3872cddcb3daSSukumar Swaminathan 		*dma_handle = NULL;
3873bafec742SSukumar Swaminathan 		return (QL_ERROR);
3874bafec742SSukumar Swaminathan 	}
3875bafec742SSukumar Swaminathan 
3876bafec742SSukumar Swaminathan 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3877accf27a5SSukumar Swaminathan 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3878bafec742SSukumar Swaminathan 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3879bafec742SSukumar Swaminathan 		ddi_dma_mem_free(acc_handle);
3880bafec742SSukumar Swaminathan 		ddi_dma_free_handle(dma_handle);
3881bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3882bafec742SSukumar Swaminathan 		    __func__);
3883cddcb3daSSukumar Swaminathan 		*acc_handle = NULL;
3884cddcb3daSSukumar Swaminathan 		*dma_handle = NULL;
3885bafec742SSukumar Swaminathan 		return (QL_ERROR);
3886bafec742SSukumar Swaminathan 	}
3887bafec742SSukumar Swaminathan 
3888bafec742SSukumar Swaminathan 	if (cnt != 1) {
3889bafec742SSukumar Swaminathan 
3890bafec742SSukumar Swaminathan 		ql_free_phys(dma_handle, acc_handle);
3891bafec742SSukumar Swaminathan 
3892bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3893bafec742SSukumar Swaminathan 		    __func__);
3894bafec742SSukumar Swaminathan 		return (QL_ERROR);
3895bafec742SSukumar Swaminathan 	}
3896bafec742SSukumar Swaminathan 
3897bafec742SSukumar Swaminathan 	bzero((caddr_t)*vaddr, rlen);
3898bafec742SSukumar Swaminathan 
3899bafec742SSukumar Swaminathan 	return (0);
3900bafec742SSukumar Swaminathan }
3901bafec742SSukumar Swaminathan 
3902bafec742SSukumar Swaminathan /*
3903bafec742SSukumar Swaminathan  * Add interrupt handlers based on the interrupt type.
3904bafec742SSukumar Swaminathan  * Before adding the interrupt handlers, the interrupt vectors should
3905bafec742SSukumar Swaminathan  * have been allocated, and the rx/tx rings have also been allocated.
3906bafec742SSukumar Swaminathan  */
3907bafec742SSukumar Swaminathan static int
ql_add_intr_handlers(qlge_t * qlge)3908bafec742SSukumar Swaminathan ql_add_intr_handlers(qlge_t *qlge)
3909bafec742SSukumar Swaminathan {
3910bafec742SSukumar Swaminathan 	int vector = 0;
3911bafec742SSukumar Swaminathan 	int rc, i;
3912bafec742SSukumar Swaminathan 	uint32_t value;
3913bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3914bafec742SSukumar Swaminathan 
3915bafec742SSukumar Swaminathan 	switch (qlge->intr_type) {
3916bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSIX:
3917bafec742SSukumar Swaminathan 		/*
3918bafec742SSukumar Swaminathan 		 * Add interrupt handler for rx and tx rings: vector[0 -
3919bafec742SSukumar Swaminathan 		 * (qlge->intr_cnt -1)].
3920bafec742SSukumar Swaminathan 		 */
3921bafec742SSukumar Swaminathan 		value = 0;
3922bafec742SSukumar Swaminathan 		for (vector = 0; vector < qlge->intr_cnt; vector++) {
3923bafec742SSukumar Swaminathan 			ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3924bafec742SSukumar Swaminathan 
3925bafec742SSukumar Swaminathan 			/*
3926bafec742SSukumar Swaminathan 			 * associate interrupt vector with interrupt handler
3927bafec742SSukumar Swaminathan 			 */
3928bafec742SSukumar Swaminathan 			rc = ddi_intr_add_handler(qlge->htable[vector],
3929bafec742SSukumar Swaminathan 			    (ddi_intr_handler_t *)intr_ctx->handler,
3930bafec742SSukumar Swaminathan 			    (void *)&qlge->rx_ring[vector], NULL);
3931bafec742SSukumar Swaminathan 
3932accf27a5SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("rx_ring[%d] 0x%p\n",
3933accf27a5SSukumar Swaminathan 			    vector, &qlge->rx_ring[vector]));
3934bafec742SSukumar Swaminathan 			if (rc != DDI_SUCCESS) {
3935bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INIT,
3936bafec742SSukumar Swaminathan 				    ("Add rx interrupt handler failed. "
3937bafec742SSukumar Swaminathan 				    "return: %d, vector: %d", rc, vector));
3938bafec742SSukumar Swaminathan 				for (vector--; vector >= 0; vector--) {
3939bafec742SSukumar Swaminathan 					(void) ddi_intr_remove_handler(
3940bafec742SSukumar Swaminathan 					    qlge->htable[vector]);
3941bafec742SSukumar Swaminathan 				}
3942bafec742SSukumar Swaminathan 				return (DDI_FAILURE);
3943bafec742SSukumar Swaminathan 			}
3944bafec742SSukumar Swaminathan 			intr_ctx++;
3945bafec742SSukumar Swaminathan 		}
3946bafec742SSukumar Swaminathan 		break;
3947bafec742SSukumar Swaminathan 
3948bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSI:
3949bafec742SSukumar Swaminathan 		/*
3950bafec742SSukumar Swaminathan 		 * Add interrupt handlers for the only vector
3951bafec742SSukumar Swaminathan 		 */
3952bafec742SSukumar Swaminathan 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3953bafec742SSukumar Swaminathan 
3954bafec742SSukumar Swaminathan 		rc = ddi_intr_add_handler(qlge->htable[vector],
3955bafec742SSukumar Swaminathan 		    ql_isr,
3956bafec742SSukumar Swaminathan 		    (caddr_t)&qlge->rx_ring[0], NULL);
3957bafec742SSukumar Swaminathan 
3958bafec742SSukumar Swaminathan 		if (rc != DDI_SUCCESS) {
3959bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT,
3960bafec742SSukumar Swaminathan 			    ("Add MSI interrupt handler failed: %d\n", rc));
3961bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
3962bafec742SSukumar Swaminathan 		}
3963bafec742SSukumar Swaminathan 		break;
3964bafec742SSukumar Swaminathan 
3965bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_FIXED:
3966bafec742SSukumar Swaminathan 		/*
3967bafec742SSukumar Swaminathan 		 * Add interrupt handlers for the only vector
3968bafec742SSukumar Swaminathan 		 */
3969bafec742SSukumar Swaminathan 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3970bafec742SSukumar Swaminathan 
3971bafec742SSukumar Swaminathan 		rc = ddi_intr_add_handler(qlge->htable[vector],
3972bafec742SSukumar Swaminathan 		    ql_isr,
3973bafec742SSukumar Swaminathan 		    (caddr_t)&qlge->rx_ring[0], NULL);
3974bafec742SSukumar Swaminathan 
3975bafec742SSukumar Swaminathan 		if (rc != DDI_SUCCESS) {
3976bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT,
3977bafec742SSukumar Swaminathan 			    ("Add legacy interrupt handler failed: %d\n", rc));
3978bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
3979bafec742SSukumar Swaminathan 		}
3980bafec742SSukumar Swaminathan 		break;
3981bafec742SSukumar Swaminathan 
3982bafec742SSukumar Swaminathan 	default:
3983bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
3984bafec742SSukumar Swaminathan 	}
3985bafec742SSukumar Swaminathan 
3986bafec742SSukumar Swaminathan 	/* Enable interrupts */
3987bafec742SSukumar Swaminathan 	/* Block enable */
3988bafec742SSukumar Swaminathan 	if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3989bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3990bafec742SSukumar Swaminathan 		    qlge->intr_cnt));
3991bafec742SSukumar Swaminathan 		(void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3992bafec742SSukumar Swaminathan 	} else { /* Non block enable */
3993bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->intr_cnt; i++) {
3994accf27a5SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d "
3995bafec742SSukumar Swaminathan 			    "handle 0x%x\n", i, qlge->htable[i]));
3996bafec742SSukumar Swaminathan 			(void) ddi_intr_enable(qlge->htable[i]);
3997bafec742SSukumar Swaminathan 		}
3998bafec742SSukumar Swaminathan 	}
3999bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_INTR_ENABLED;
4000bafec742SSukumar Swaminathan 
4001bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
4002bafec742SSukumar Swaminathan }
4003bafec742SSukumar Swaminathan 
4004bafec742SSukumar Swaminathan /*
4005bafec742SSukumar Swaminathan  * Here we build the intr_ctx structures based on
4006bafec742SSukumar Swaminathan  * our rx_ring count and intr vector count.
4007bafec742SSukumar Swaminathan  * The intr_ctx structure is used to hook each vector
4008bafec742SSukumar Swaminathan  * to possibly different handlers.
4009bafec742SSukumar Swaminathan  */
4010bafec742SSukumar Swaminathan static void
ql_resolve_queues_to_irqs(qlge_t * qlge)4011bafec742SSukumar Swaminathan ql_resolve_queues_to_irqs(qlge_t *qlge)
4012bafec742SSukumar Swaminathan {
4013bafec742SSukumar Swaminathan 	int i = 0;
4014bafec742SSukumar Swaminathan 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
4015bafec742SSukumar Swaminathan 
4016bafec742SSukumar Swaminathan 	if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
4017bafec742SSukumar Swaminathan 		/*
4018bafec742SSukumar Swaminathan 		 * Each rx_ring has its own intr_ctx since we
4019bafec742SSukumar Swaminathan 		 * have separate vectors for each queue.
4020bafec742SSukumar Swaminathan 		 * This only true when MSI-X is enabled.
4021bafec742SSukumar Swaminathan 		 */
4022bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
4023bafec742SSukumar Swaminathan 			qlge->rx_ring[i].irq = i;
4024bafec742SSukumar Swaminathan 			intr_ctx->intr = i;
4025bafec742SSukumar Swaminathan 			intr_ctx->qlge = qlge;
4026bafec742SSukumar Swaminathan 
4027bafec742SSukumar Swaminathan 			/*
4028bafec742SSukumar Swaminathan 			 * We set up each vectors enable/disable/read bits so
4029bafec742SSukumar Swaminathan 			 * there's no bit/mask calculations in critical path.
4030bafec742SSukumar Swaminathan 			 */
4031bafec742SSukumar Swaminathan 			intr_ctx->intr_en_mask =
4032bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4033bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4034bafec742SSukumar Swaminathan 			    INTR_EN_IHD | i;
4035bafec742SSukumar Swaminathan 			intr_ctx->intr_dis_mask =
4036bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4037bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4038bafec742SSukumar Swaminathan 			    INTR_EN_IHD | i;
4039bafec742SSukumar Swaminathan 			intr_ctx->intr_read_mask =
4040bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4041bafec742SSukumar Swaminathan 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4042bafec742SSukumar Swaminathan 			    | i;
4043bafec742SSukumar Swaminathan 
4044bafec742SSukumar Swaminathan 			if (i == 0) {
4045bafec742SSukumar Swaminathan 				/*
4046bafec742SSukumar Swaminathan 				 * Default queue handles bcast/mcast plus
4047bafec742SSukumar Swaminathan 				 * async events.
4048bafec742SSukumar Swaminathan 				 */
4049bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_isr;
4050bafec742SSukumar Swaminathan 			} else if (qlge->rx_ring[i].type == TX_Q) {
4051bafec742SSukumar Swaminathan 				/*
4052bafec742SSukumar Swaminathan 				 * Outbound queue is for outbound completions
4053bafec742SSukumar Swaminathan 				 * only.
4054bafec742SSukumar Swaminathan 				 */
4055accf27a5SSukumar Swaminathan 				if (qlge->isr_stride)
4056accf27a5SSukumar Swaminathan 					intr_ctx->handler = ql_msix_isr;
4057accf27a5SSukumar Swaminathan 				else
4058accf27a5SSukumar Swaminathan 					intr_ctx->handler = ql_msix_tx_isr;
4059accf27a5SSukumar Swaminathan 			} else {
4060accf27a5SSukumar Swaminathan 				/*
4061accf27a5SSukumar Swaminathan 				 * Inbound queues handle unicast frames only.
4062accf27a5SSukumar Swaminathan 				 */
4063accf27a5SSukumar Swaminathan 				if (qlge->isr_stride)
4064accf27a5SSukumar Swaminathan 					intr_ctx->handler = ql_msix_isr;
4065accf27a5SSukumar Swaminathan 				else
4066accf27a5SSukumar Swaminathan 					intr_ctx->handler = ql_msix_rx_isr;
4067accf27a5SSukumar Swaminathan 			}
4068accf27a5SSukumar Swaminathan 		}
4069accf27a5SSukumar Swaminathan 		i = qlge->intr_cnt;
4070accf27a5SSukumar Swaminathan 		for (; i < qlge->rx_ring_count; i++, intr_ctx++) {
4071accf27a5SSukumar Swaminathan 			int iv = i - qlge->isr_stride;
4072accf27a5SSukumar Swaminathan 			qlge->rx_ring[i].irq = iv;
4073accf27a5SSukumar Swaminathan 			intr_ctx->intr = iv;
4074accf27a5SSukumar Swaminathan 			intr_ctx->qlge = qlge;
4075accf27a5SSukumar Swaminathan 
4076accf27a5SSukumar Swaminathan 			/*
4077accf27a5SSukumar Swaminathan 			 * We set up each vectors enable/disable/read bits so
4078accf27a5SSukumar Swaminathan 			 * there's no bit/mask calculations in critical path.
4079accf27a5SSukumar Swaminathan 			 */
4080accf27a5SSukumar Swaminathan 			intr_ctx->intr_en_mask =
4081accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4082accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4083accf27a5SSukumar Swaminathan 			    INTR_EN_IHD | iv;
4084accf27a5SSukumar Swaminathan 			intr_ctx->intr_dis_mask =
4085accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4086accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4087accf27a5SSukumar Swaminathan 			    INTR_EN_IHD | iv;
4088accf27a5SSukumar Swaminathan 			intr_ctx->intr_read_mask =
4089accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4090accf27a5SSukumar Swaminathan 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4091accf27a5SSukumar Swaminathan 			    | iv;
4092accf27a5SSukumar Swaminathan 
4093accf27a5SSukumar Swaminathan 			if (qlge->rx_ring[i].type == TX_Q) {
4094accf27a5SSukumar Swaminathan 				/*
4095accf27a5SSukumar Swaminathan 				 * Outbound queue is for outbound completions
4096accf27a5SSukumar Swaminathan 				 * only.
4097accf27a5SSukumar Swaminathan 				 */
4098accf27a5SSukumar Swaminathan 				intr_ctx->handler = ql_msix_isr;
4099bafec742SSukumar Swaminathan 			} else {
4100bafec742SSukumar Swaminathan 				/*
4101bafec742SSukumar Swaminathan 				 * Inbound queues handle unicast frames only.
4102bafec742SSukumar Swaminathan 				 */
4103bafec742SSukumar Swaminathan 				intr_ctx->handler = ql_msix_rx_isr;
4104bafec742SSukumar Swaminathan 			}
4105bafec742SSukumar Swaminathan 		}
4106bafec742SSukumar Swaminathan 	} else {
4107bafec742SSukumar Swaminathan 		/*
4108bafec742SSukumar Swaminathan 		 * All rx_rings use the same intr_ctx since
4109bafec742SSukumar Swaminathan 		 * there is only one vector.
4110bafec742SSukumar Swaminathan 		 */
4111bafec742SSukumar Swaminathan 		intr_ctx->intr = 0;
4112bafec742SSukumar Swaminathan 		intr_ctx->qlge = qlge;
4113bafec742SSukumar Swaminathan 		/*
4114bafec742SSukumar Swaminathan 		 * We set up each vectors enable/disable/read bits so
4115bafec742SSukumar Swaminathan 		 * there's no bit/mask calculations in the critical path.
4116bafec742SSukumar Swaminathan 		 */
4117bafec742SSukumar Swaminathan 		intr_ctx->intr_en_mask =
4118bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4119bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_ENABLE;
4120bafec742SSukumar Swaminathan 		intr_ctx->intr_dis_mask =
4121bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4122bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_DISABLE;
4123bafec742SSukumar Swaminathan 		intr_ctx->intr_read_mask =
4124bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4125bafec742SSukumar Swaminathan 		    INTR_EN_TYPE_READ;
4126bafec742SSukumar Swaminathan 		/*
4127bafec742SSukumar Swaminathan 		 * Single interrupt means one handler for all rings.
4128bafec742SSukumar Swaminathan 		 */
4129bafec742SSukumar Swaminathan 		intr_ctx->handler = ql_isr;
4130bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++)
4131bafec742SSukumar Swaminathan 			qlge->rx_ring[i].irq = 0;
4132bafec742SSukumar Swaminathan 	}
4133bafec742SSukumar Swaminathan }
4134bafec742SSukumar Swaminathan 
4135bafec742SSukumar Swaminathan 
4136bafec742SSukumar Swaminathan /*
4137bafec742SSukumar Swaminathan  * Free allocated interrupts.
4138bafec742SSukumar Swaminathan  */
4139bafec742SSukumar Swaminathan static void
ql_free_irq_vectors(qlge_t * qlge)4140bafec742SSukumar Swaminathan ql_free_irq_vectors(qlge_t *qlge)
4141bafec742SSukumar Swaminathan {
4142bafec742SSukumar Swaminathan 	int i;
4143bafec742SSukumar Swaminathan 	int rc;
4144bafec742SSukumar Swaminathan 
4145bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ENABLED) {
4146bafec742SSukumar Swaminathan 		/* Disable all interrupts */
4147bafec742SSukumar Swaminathan 		if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
4148bafec742SSukumar Swaminathan 			/* Call ddi_intr_block_disable() */
4149bafec742SSukumar Swaminathan 			(void) ddi_intr_block_disable(qlge->htable,
4150bafec742SSukumar Swaminathan 			    qlge->intr_cnt);
4151bafec742SSukumar Swaminathan 		} else {
4152bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->intr_cnt; i++) {
4153bafec742SSukumar Swaminathan 				(void) ddi_intr_disable(qlge->htable[i]);
4154bafec742SSukumar Swaminathan 			}
4155bafec742SSukumar Swaminathan 		}
4156bafec742SSukumar Swaminathan 
4157bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_INTR_ENABLED;
4158bafec742SSukumar Swaminathan 	}
4159bafec742SSukumar Swaminathan 
4160bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->intr_cnt; i++) {
4161bafec742SSukumar Swaminathan 
4162bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_ADD_INTERRUPT)
4163bafec742SSukumar Swaminathan 			(void) ddi_intr_remove_handler(qlge->htable[i]);
4164bafec742SSukumar Swaminathan 
4165bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_INTR_ALLOC) {
4166bafec742SSukumar Swaminathan 			rc = ddi_intr_free(qlge->htable[i]);
4167bafec742SSukumar Swaminathan 			if (rc != DDI_SUCCESS) {
4168bafec742SSukumar Swaminathan 				/* EMPTY */
4169bafec742SSukumar Swaminathan 				QL_PRINT(DBG_INIT, ("Free intr failed: %d",
4170bafec742SSukumar Swaminathan 				    rc));
4171bafec742SSukumar Swaminathan 			}
4172bafec742SSukumar Swaminathan 		}
4173bafec742SSukumar Swaminathan 	}
4174bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ALLOC)
4175bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_INTR_ALLOC;
4176bafec742SSukumar Swaminathan 
4177bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_INTERRUPT)
4178bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4179bafec742SSukumar Swaminathan 
4180bafec742SSukumar Swaminathan 	if (qlge->htable) {
4181bafec742SSukumar Swaminathan 		kmem_free(qlge->htable, qlge->intr_size);
4182bafec742SSukumar Swaminathan 		qlge->htable = NULL;
4183bafec742SSukumar Swaminathan 	}
4184bafec742SSukumar Swaminathan }
4185bafec742SSukumar Swaminathan 
4186bafec742SSukumar Swaminathan /*
4187bafec742SSukumar Swaminathan  * Allocate interrupt vectors
4188bafec742SSukumar Swaminathan  * For legacy and MSI, only 1 handle is needed.
4189bafec742SSukumar Swaminathan  * For MSI-X, if fewer than 2 vectors are available, return failure.
4190bafec742SSukumar Swaminathan  * Upon success, this maps the vectors to rx and tx rings for
4191bafec742SSukumar Swaminathan  * interrupts.
4192bafec742SSukumar Swaminathan  */
4193bafec742SSukumar Swaminathan static int
ql_request_irq_vectors(qlge_t * qlge,int intr_type)4194bafec742SSukumar Swaminathan ql_request_irq_vectors(qlge_t *qlge, int intr_type)
4195bafec742SSukumar Swaminathan {
4196bafec742SSukumar Swaminathan 	dev_info_t *devinfo;
4197bafec742SSukumar Swaminathan 	uint32_t request, orig;
4198bafec742SSukumar Swaminathan 	int count, avail, actual;
4199bafec742SSukumar Swaminathan 	int minimum;
4200bafec742SSukumar Swaminathan 	int rc;
4201bafec742SSukumar Swaminathan 
4202bafec742SSukumar Swaminathan 	devinfo = qlge->dip;
4203bafec742SSukumar Swaminathan 
4204bafec742SSukumar Swaminathan 	switch (intr_type) {
4205bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_FIXED:
4206bafec742SSukumar Swaminathan 		request = 1;	/* Request 1 legacy interrupt handle */
4207bafec742SSukumar Swaminathan 		minimum = 1;
4208bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
4209bafec742SSukumar Swaminathan 		break;
4210bafec742SSukumar Swaminathan 
4211bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSI:
4212bafec742SSukumar Swaminathan 		request = 1;	/* Request 1 MSI interrupt handle */
4213bafec742SSukumar Swaminathan 		minimum = 1;
4214bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
4215bafec742SSukumar Swaminathan 		break;
4216bafec742SSukumar Swaminathan 
4217bafec742SSukumar Swaminathan 	case DDI_INTR_TYPE_MSIX:
4218bafec742SSukumar Swaminathan 		/*
4219bafec742SSukumar Swaminathan 		 * Ideal number of vectors for the adapter is
4220bafec742SSukumar Swaminathan 		 * # rss rings + tx completion rings for default completion
4221bafec742SSukumar Swaminathan 		 * queue.
4222bafec742SSukumar Swaminathan 		 */
4223bafec742SSukumar Swaminathan 		request = qlge->rx_ring_count;
4224bafec742SSukumar Swaminathan 
4225bafec742SSukumar Swaminathan 		orig = request;
4226bafec742SSukumar Swaminathan 		if (request > (MAX_RX_RINGS))
4227bafec742SSukumar Swaminathan 			request = MAX_RX_RINGS;
4228bafec742SSukumar Swaminathan 		minimum = 2;
4229bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
4230bafec742SSukumar Swaminathan 		break;
4231bafec742SSukumar Swaminathan 
4232bafec742SSukumar Swaminathan 	default:
4233bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
4234bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
4235bafec742SSukumar Swaminathan 	}
4236bafec742SSukumar Swaminathan 
4237bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupt handles requested: %d  minimum: %d\n",
4238bafec742SSukumar Swaminathan 	    request, minimum));
4239bafec742SSukumar Swaminathan 
4240bafec742SSukumar Swaminathan 	/*
4241bafec742SSukumar Swaminathan 	 * Get number of supported interrupts
4242bafec742SSukumar Swaminathan 	 */
4243bafec742SSukumar Swaminathan 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4244bafec742SSukumar Swaminathan 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
4245bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
4246bafec742SSukumar Swaminathan 		    "count: %d\n", rc, count));
4247bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
4248bafec742SSukumar Swaminathan 	}
4249bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
4250bafec742SSukumar Swaminathan 
4251bafec742SSukumar Swaminathan 	/*
4252bafec742SSukumar Swaminathan 	 * Get number of available interrupts
4253bafec742SSukumar Swaminathan 	 */
4254bafec742SSukumar Swaminathan 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4255bafec742SSukumar Swaminathan 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
4256bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT,
4257bafec742SSukumar Swaminathan 		    ("Get interrupt available number failed. Return:"
4258bafec742SSukumar Swaminathan 		    " %d, available: %d\n", rc, avail));
4259bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
4260bafec742SSukumar Swaminathan 	}
4261bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
4262bafec742SSukumar Swaminathan 
4263bafec742SSukumar Swaminathan 	if (avail < request) {
4264bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
4265bafec742SSukumar Swaminathan 		    request, avail));
4266bafec742SSukumar Swaminathan 		request = avail;
4267bafec742SSukumar Swaminathan 	}
4268bafec742SSukumar Swaminathan 
4269bafec742SSukumar Swaminathan 	actual = 0;
4270bafec742SSukumar Swaminathan 	qlge->intr_cnt = 0;
4271bafec742SSukumar Swaminathan 
4272bafec742SSukumar Swaminathan 	/*
4273bafec742SSukumar Swaminathan 	 * Allocate an array of interrupt handles
4274bafec742SSukumar Swaminathan 	 */
4275bafec742SSukumar Swaminathan 	qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
4276bafec742SSukumar Swaminathan 	qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
4277bafec742SSukumar Swaminathan 
4278bafec742SSukumar Swaminathan 	rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
4279bafec742SSukumar Swaminathan 	    (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
4280bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
4281bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
4282bafec742SSukumar Swaminathan 		    " %d, request: %d, actual: %d",
4283bafec742SSukumar Swaminathan 		    __func__, qlge->instance, rc, request, actual);
4284bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
4285bafec742SSukumar Swaminathan 	}
4286bafec742SSukumar Swaminathan 	qlge->intr_cnt = actual;
4287bafec742SSukumar Swaminathan 
4288bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_INTR_ALLOC;
4289bafec742SSukumar Swaminathan 
4290bafec742SSukumar Swaminathan 	/*
4291bafec742SSukumar Swaminathan 	 * If the actual number of vectors is less than the minumum
4292bafec742SSukumar Swaminathan 	 * then fail.
4293bafec742SSukumar Swaminathan 	 */
4294bafec742SSukumar Swaminathan 	if (actual < minimum) {
4295bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
4296bafec742SSukumar Swaminathan 		    "Insufficient interrupt handles available: %d", actual);
4297bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
4298bafec742SSukumar Swaminathan 	}
4299bafec742SSukumar Swaminathan 
4300bafec742SSukumar Swaminathan 	/*
4301bafec742SSukumar Swaminathan 	 * For MSI-X, actual might force us to reduce number of tx & rx rings
4302bafec742SSukumar Swaminathan 	 */
4303bafec742SSukumar Swaminathan 	if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
4304accf27a5SSukumar Swaminathan 		if (actual >= (orig / 2)) {
4305accf27a5SSukumar Swaminathan 			count = orig / 2;
4306accf27a5SSukumar Swaminathan 			qlge->rss_ring_count = count;
4307accf27a5SSukumar Swaminathan 			qlge->tx_ring_count = count;
4308accf27a5SSukumar Swaminathan 			qlge->isr_stride = count;
4309accf27a5SSukumar Swaminathan 		} else if (actual >= (orig / 4)) {
4310accf27a5SSukumar Swaminathan 			count = orig / 4;
4311accf27a5SSukumar Swaminathan 			qlge->rss_ring_count = count;
4312accf27a5SSukumar Swaminathan 			qlge->tx_ring_count = count;
4313accf27a5SSukumar Swaminathan 			qlge->isr_stride = count;
4314accf27a5SSukumar Swaminathan 		} else if (actual >= (orig / 8)) {
4315accf27a5SSukumar Swaminathan 			count = orig / 8;
4316accf27a5SSukumar Swaminathan 			qlge->rss_ring_count = count;
4317accf27a5SSukumar Swaminathan 			qlge->tx_ring_count = count;
4318accf27a5SSukumar Swaminathan 			qlge->isr_stride = count;
4319accf27a5SSukumar Swaminathan 		} else if (actual < MAX_RX_RINGS) {
4320bafec742SSukumar Swaminathan 			qlge->tx_ring_count = 1;
4321bafec742SSukumar Swaminathan 			qlge->rss_ring_count = actual - 1;
4322bafec742SSukumar Swaminathan 		}
4323accf27a5SSukumar Swaminathan 		qlge->intr_cnt = count;
4324accf27a5SSukumar Swaminathan 		qlge->rx_ring_count = qlge->tx_ring_count +
4325accf27a5SSukumar Swaminathan 		    qlge->rss_ring_count;
4326bafec742SSukumar Swaminathan 	}
4327accf27a5SSukumar Swaminathan 	cmn_err(CE_NOTE, "!qlge(%d) tx %d, rss %d, stride %d\n", qlge->instance,
4328accf27a5SSukumar Swaminathan 	    qlge->tx_ring_count, qlge->rss_ring_count, qlge->isr_stride);
4329accf27a5SSukumar Swaminathan 
4330bafec742SSukumar Swaminathan 	/*
4331bafec742SSukumar Swaminathan 	 * Get priority for first vector, assume remaining are all the same
4332bafec742SSukumar Swaminathan 	 */
4333bafec742SSukumar Swaminathan 	rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
4334bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
4335bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
4336bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
4337bafec742SSukumar Swaminathan 	}
4338bafec742SSukumar Swaminathan 
4339bafec742SSukumar Swaminathan 	rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
4340bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
4341bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
4342bafec742SSukumar Swaminathan 		goto ql_intr_alloc_fail;
4343bafec742SSukumar Swaminathan 	}
4344bafec742SSukumar Swaminathan 
4345bafec742SSukumar Swaminathan 	qlge->intr_type = intr_type;
4346bafec742SSukumar Swaminathan 
4347bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
4348bafec742SSukumar Swaminathan 
4349bafec742SSukumar Swaminathan ql_intr_alloc_fail:
4350bafec742SSukumar Swaminathan 	ql_free_irq_vectors(qlge);
4351bafec742SSukumar Swaminathan 
4352bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
4353bafec742SSukumar Swaminathan }
4354bafec742SSukumar Swaminathan 
4355bafec742SSukumar Swaminathan /*
4356bafec742SSukumar Swaminathan  * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
4357bafec742SSukumar Swaminathan  * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
4358bafec742SSukumar Swaminathan  * transmit queue.
4359bafec742SSukumar Swaminathan  */
4360bafec742SSukumar Swaminathan int
ql_alloc_irqs(qlge_t * qlge)4361bafec742SSukumar Swaminathan ql_alloc_irqs(qlge_t *qlge)
4362bafec742SSukumar Swaminathan {
4363bafec742SSukumar Swaminathan 	int intr_types;
4364bafec742SSukumar Swaminathan 	int rval;
4365bafec742SSukumar Swaminathan 
4366bafec742SSukumar Swaminathan 	/*
4367bafec742SSukumar Swaminathan 	 * Get supported interrupt types
4368bafec742SSukumar Swaminathan 	 */
4369bafec742SSukumar Swaminathan 	if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
4370bafec742SSukumar Swaminathan 	    != DDI_SUCCESS) {
4371bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
4372bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
4373bafec742SSukumar Swaminathan 
4374bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
4375bafec742SSukumar Swaminathan 	}
4376bafec742SSukumar Swaminathan 
4377bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
4378bafec742SSukumar Swaminathan 	    __func__, qlge->instance, intr_types));
4379bafec742SSukumar Swaminathan 
4380bafec742SSukumar Swaminathan 	/* Install MSI-X interrupts */
4381bafec742SSukumar Swaminathan 	if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
4382bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
4383bafec742SSukumar Swaminathan 		    __func__, qlge->instance, intr_types));
4384bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
4385bafec742SSukumar Swaminathan 		if (rval == DDI_SUCCESS) {
4386bafec742SSukumar Swaminathan 			return (rval);
4387bafec742SSukumar Swaminathan 		}
4388bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
4389bafec742SSukumar Swaminathan 		    " trying MSI interrupts ...\n", __func__, qlge->instance));
4390bafec742SSukumar Swaminathan 	}
4391bafec742SSukumar Swaminathan 
4392bafec742SSukumar Swaminathan 	/*
4393bafec742SSukumar Swaminathan 	 * We will have 2 completion queues in MSI / Legacy mode,
4394bafec742SSukumar Swaminathan 	 * Queue 0 for default completions
4395bafec742SSukumar Swaminathan 	 * Queue 1 for transmit completions
4396bafec742SSukumar Swaminathan 	 */
4397bafec742SSukumar Swaminathan 	qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
4398bafec742SSukumar Swaminathan 	qlge->tx_ring_count = 1; /* Single tx completion queue */
4399bafec742SSukumar Swaminathan 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
4400bafec742SSukumar Swaminathan 
4401bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
4402bafec742SSukumar Swaminathan 	    __func__, qlge->instance));
4403bafec742SSukumar Swaminathan 	/*
4404bafec742SSukumar Swaminathan 	 * Add the h/w interrupt handler and initialise mutexes
4405bafec742SSukumar Swaminathan 	 */
4406bafec742SSukumar Swaminathan 	rval = DDI_FAILURE;
4407bafec742SSukumar Swaminathan 
4408bafec742SSukumar Swaminathan 	/*
4409bafec742SSukumar Swaminathan 	 * If OS supports MSIX interrupt but fails to allocate, then try
4410bafec742SSukumar Swaminathan 	 * MSI interrupt. If MSI interrupt allocation fails also, then roll
4411bafec742SSukumar Swaminathan 	 * back to fixed interrupt.
4412bafec742SSukumar Swaminathan 	 */
4413bafec742SSukumar Swaminathan 	if (intr_types & DDI_INTR_TYPE_MSI) {
4414bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
4415bafec742SSukumar Swaminathan 		if (rval == DDI_SUCCESS) {
4416bafec742SSukumar Swaminathan 			qlge->intr_type = DDI_INTR_TYPE_MSI;
4417bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
4418bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
4419bafec742SSukumar Swaminathan 		}
4420bafec742SSukumar Swaminathan 	}
4421bafec742SSukumar Swaminathan 
4422bafec742SSukumar Swaminathan 	/* Try Fixed interrupt Legacy mode */
4423bafec742SSukumar Swaminathan 	if (rval != DDI_SUCCESS) {
4424bafec742SSukumar Swaminathan 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
4425bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS) {
4426bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
4427bafec742SSukumar Swaminathan 			    "allocation failed",
4428bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
4429bafec742SSukumar Swaminathan 		} else {
4430bafec742SSukumar Swaminathan 			qlge->intr_type = DDI_INTR_TYPE_FIXED;
4431bafec742SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
4432bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
4433bafec742SSukumar Swaminathan 		}
4434bafec742SSukumar Swaminathan 	}
4435bafec742SSukumar Swaminathan 
4436bafec742SSukumar Swaminathan 	return (rval);
4437bafec742SSukumar Swaminathan }
4438bafec742SSukumar Swaminathan 
4439bafec742SSukumar Swaminathan static void
ql_free_rx_tx_locks(qlge_t * qlge)4440bafec742SSukumar Swaminathan ql_free_rx_tx_locks(qlge_t *qlge)
4441bafec742SSukumar Swaminathan {
4442bafec742SSukumar Swaminathan 	int i;
4443bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
4444bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
4445bafec742SSukumar Swaminathan 
4446bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
4447bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
4448bafec742SSukumar Swaminathan 		mutex_destroy(&tx_ring->tx_lock);
4449bafec742SSukumar Swaminathan 	}
4450bafec742SSukumar Swaminathan 
4451bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
4452bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
4453bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->rx_lock);
4454bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->sbq_lock);
4455bafec742SSukumar Swaminathan 		mutex_destroy(&rx_ring->lbq_lock);
4456bafec742SSukumar Swaminathan 	}
4457bafec742SSukumar Swaminathan }
4458bafec742SSukumar Swaminathan 
4459bafec742SSukumar Swaminathan /*
4460bafec742SSukumar Swaminathan  * Frees all resources allocated during attach.
4461bafec742SSukumar Swaminathan  *
4462bafec742SSukumar Swaminathan  * Input:
4463bafec742SSukumar Swaminathan  * dip = pointer to device information structure.
4464bafec742SSukumar Swaminathan  * sequence = bits indicating resources to free.
4465bafec742SSukumar Swaminathan  *
4466bafec742SSukumar Swaminathan  * Context:
4467bafec742SSukumar Swaminathan  * Kernel context.
4468bafec742SSukumar Swaminathan  */
4469bafec742SSukumar Swaminathan static void
ql_free_resources(qlge_t * qlge)4470accf27a5SSukumar Swaminathan ql_free_resources(qlge_t *qlge)
4471bafec742SSukumar Swaminathan {
4472bafec742SSukumar Swaminathan 
4473bafec742SSukumar Swaminathan 	/* Disable driver timer */
4474bafec742SSukumar Swaminathan 	ql_stop_timer(qlge);
4475bafec742SSukumar Swaminathan 
4476bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MAC_REGISTERED) {
44770662fbf4SSukumar Swaminathan 		(void) mac_unregister(qlge->mh);
4478bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MAC_REGISTERED;
4479bafec742SSukumar Swaminathan 	}
4480bafec742SSukumar Swaminathan 
4481bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MAC_ALLOC) {
4482bafec742SSukumar Swaminathan 		/* Nothing to do, macp is already freed */
4483bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MAC_ALLOC;
4484bafec742SSukumar Swaminathan 	}
4485bafec742SSukumar Swaminathan 
4486bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4487bafec742SSukumar Swaminathan 		pci_config_teardown(&qlge->pci_handle);
4488bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4489bafec742SSukumar Swaminathan 	}
4490bafec742SSukumar Swaminathan 
4491cddcb3daSSukumar Swaminathan 	if (qlge->sequence & INIT_INTR_ALLOC) {
4492bafec742SSukumar Swaminathan 		ql_free_irq_vectors(qlge);
4493bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4494bafec742SSukumar Swaminathan 	}
4495bafec742SSukumar Swaminathan 
4496bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4497bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4498bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4499bafec742SSukumar Swaminathan 		(void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4500bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4501bafec742SSukumar Swaminathan 	}
4502bafec742SSukumar Swaminathan 
4503bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_KSTATS) {
4504bafec742SSukumar Swaminathan 		ql_fini_kstats(qlge);
4505bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_KSTATS;
4506bafec742SSukumar Swaminathan 	}
4507bafec742SSukumar Swaminathan 
4508bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MUTEX) {
4509bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->gen_mutex);
4510bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->hw_mutex);
4511bafec742SSukumar Swaminathan 		mutex_destroy(&qlge->mbx_mutex);
4512bafec742SSukumar Swaminathan 		cv_destroy(&qlge->cv_mbx_intr);
4513bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MUTEX;
4514bafec742SSukumar Swaminathan 	}
4515bafec742SSukumar Swaminathan 
4516bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_LOCKS_CREATED) {
4517bafec742SSukumar Swaminathan 		ql_free_rx_tx_locks(qlge);
4518bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_LOCKS_CREATED;
4519bafec742SSukumar Swaminathan 	}
4520bafec742SSukumar Swaminathan 
4521bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_MEMORY_ALLOC) {
4522bafec742SSukumar Swaminathan 		ql_free_mem_resources(qlge);
4523bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_MEMORY_ALLOC;
4524bafec742SSukumar Swaminathan 	}
4525bafec742SSukumar Swaminathan 
4526bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_REGS_SETUP) {
4527bafec742SSukumar Swaminathan 		ddi_regs_map_free(&qlge->dev_handle);
4528bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_REGS_SETUP;
4529bafec742SSukumar Swaminathan 	}
4530bafec742SSukumar Swaminathan 
4531bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4532bafec742SSukumar Swaminathan 		ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4533bafec742SSukumar Swaminathan 		qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4534bafec742SSukumar Swaminathan 	}
4535bafec742SSukumar Swaminathan 
4536bafec742SSukumar Swaminathan 	/*
4537bafec742SSukumar Swaminathan 	 * free flash flt table that allocated in attach stage
4538bafec742SSukumar Swaminathan 	 */
4539bafec742SSukumar Swaminathan 	if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4540bafec742SSukumar Swaminathan 	    (qlge->flt.header.length != 0)) {
4541bafec742SSukumar Swaminathan 		kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4542bafec742SSukumar Swaminathan 		qlge->flt.ql_flt_entry_ptr = NULL;
4543bafec742SSukumar Swaminathan 	}
4544bafec742SSukumar Swaminathan 
4545accf27a5SSukumar Swaminathan 	if (qlge->sequence & INIT_FM) {
4546accf27a5SSukumar Swaminathan 		ql_fm_fini(qlge);
4547accf27a5SSukumar Swaminathan 		qlge->sequence &= ~INIT_FM;
4548accf27a5SSukumar Swaminathan 	}
4549accf27a5SSukumar Swaminathan 
4550accf27a5SSukumar Swaminathan 	ddi_prop_remove_all(qlge->dip);
4551accf27a5SSukumar Swaminathan 	ddi_set_driver_private(qlge->dip, NULL);
4552accf27a5SSukumar Swaminathan 
4553bafec742SSukumar Swaminathan 	/* finally, free qlge structure */
4554bafec742SSukumar Swaminathan 	if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4555bafec742SSukumar Swaminathan 		kmem_free(qlge, sizeof (qlge_t));
4556bafec742SSukumar Swaminathan 	}
4557bafec742SSukumar Swaminathan }
4558bafec742SSukumar Swaminathan 
4559bafec742SSukumar Swaminathan /*
4560bafec742SSukumar Swaminathan  * Set promiscuous mode of the driver
4561bafec742SSukumar Swaminathan  * Caller must catch HW_LOCK
4562bafec742SSukumar Swaminathan  */
4563bafec742SSukumar Swaminathan void
ql_set_promiscuous(qlge_t * qlge,int mode)4564bafec742SSukumar Swaminathan ql_set_promiscuous(qlge_t *qlge, int mode)
4565bafec742SSukumar Swaminathan {
4566bafec742SSukumar Swaminathan 	if (mode) {
45670662fbf4SSukumar Swaminathan 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4568bafec742SSukumar Swaminathan 		    RT_IDX_VALID, 1);
4569bafec742SSukumar Swaminathan 	} else {
45700662fbf4SSukumar Swaminathan 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4571bafec742SSukumar Swaminathan 		    RT_IDX_VALID, 0);
4572bafec742SSukumar Swaminathan 	}
4573bafec742SSukumar Swaminathan }
4574bafec742SSukumar Swaminathan /*
4575bafec742SSukumar Swaminathan  * Write 'data1' to Mac Protocol Address Index Register and
4576bafec742SSukumar Swaminathan  * 'data2' to Mac Protocol Address Data Register
4577bafec742SSukumar Swaminathan  *  Assuming that the Mac Protocol semaphore lock has been acquired.
4578bafec742SSukumar Swaminathan  */
4579bafec742SSukumar Swaminathan static int
ql_write_mac_proto_regs(qlge_t * qlge,uint32_t data1,uint32_t data2)4580bafec742SSukumar Swaminathan ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4581bafec742SSukumar Swaminathan {
4582bafec742SSukumar Swaminathan 	int return_value = DDI_SUCCESS;
4583bafec742SSukumar Swaminathan 
4584bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4585bafec742SSukumar Swaminathan 	    MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4586bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4587bafec742SSukumar Swaminathan 		    "timeout.");
4588bafec742SSukumar Swaminathan 		return_value = DDI_FAILURE;
4589bafec742SSukumar Swaminathan 		goto out;
4590bafec742SSukumar Swaminathan 	}
4591bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4592bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4593bafec742SSukumar Swaminathan out:
4594bafec742SSukumar Swaminathan 	return (return_value);
4595bafec742SSukumar Swaminathan }
4596bafec742SSukumar Swaminathan /*
4597bafec742SSukumar Swaminathan  * Enable the 'index'ed multicast address in the host memory's multicast_list
4598bafec742SSukumar Swaminathan  */
4599bafec742SSukumar Swaminathan int
ql_add_multicast_address(qlge_t * qlge,int index)4600bafec742SSukumar Swaminathan ql_add_multicast_address(qlge_t *qlge, int index)
4601bafec742SSukumar Swaminathan {
4602bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4603bafec742SSukumar Swaminathan 	uint32_t offset;
4604bafec742SSukumar Swaminathan 	uint32_t value1, value2;
4605bafec742SSukumar Swaminathan 
4606bafec742SSukumar Swaminathan 	/* Acquire the required semaphore */
4607bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4608bafec742SSukumar Swaminathan 		return (rtn_val);
4609bafec742SSukumar Swaminathan 	}
4610bafec742SSukumar Swaminathan 
4611bafec742SSukumar Swaminathan 	/* Program Offset0 - lower 32 bits of the MAC address */
4612bafec742SSukumar Swaminathan 	offset = 0;
4613bafec742SSukumar Swaminathan 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4614bafec742SSukumar Swaminathan 	    (index << 4) | offset;
4615bafec742SSukumar Swaminathan 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4616bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4617bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4618bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4619bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4620bafec742SSukumar Swaminathan 		goto out;
4621bafec742SSukumar Swaminathan 
4622bafec742SSukumar Swaminathan 	/* Program offset1: upper 16 bits of the MAC address */
4623bafec742SSukumar Swaminathan 	offset = 1;
4624bafec742SSukumar Swaminathan 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4625bafec742SSukumar Swaminathan 	    (index<<4) | offset;
4626bafec742SSukumar Swaminathan 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4627bafec742SSukumar Swaminathan 	    |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4628bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4629bafec742SSukumar Swaminathan 		goto out;
4630bafec742SSukumar Swaminathan 	}
4631bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4632bafec742SSukumar Swaminathan out:
4633bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4634bafec742SSukumar Swaminathan 	return (rtn_val);
4635bafec742SSukumar Swaminathan }
4636bafec742SSukumar Swaminathan 
4637bafec742SSukumar Swaminathan /*
4638bafec742SSukumar Swaminathan  * Disable the 'index'ed multicast address in the host memory's multicast_list
4639bafec742SSukumar Swaminathan  */
4640bafec742SSukumar Swaminathan int
ql_remove_multicast_address(qlge_t * qlge,int index)4641bafec742SSukumar Swaminathan ql_remove_multicast_address(qlge_t *qlge, int index)
4642bafec742SSukumar Swaminathan {
4643bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4644bafec742SSukumar Swaminathan 	uint32_t offset;
4645bafec742SSukumar Swaminathan 	uint32_t value1, value2;
4646bafec742SSukumar Swaminathan 
4647bafec742SSukumar Swaminathan 	/* Acquire the required semaphore */
4648bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4649bafec742SSukumar Swaminathan 		return (rtn_val);
4650bafec742SSukumar Swaminathan 	}
4651bafec742SSukumar Swaminathan 	/* Program Offset0 - lower 32 bits of the MAC address */
4652bafec742SSukumar Swaminathan 	offset = 0;
4653bafec742SSukumar Swaminathan 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4654bafec742SSukumar Swaminathan 	value2 =
4655bafec742SSukumar Swaminathan 	    ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4656bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4657bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4658bafec742SSukumar Swaminathan 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4659bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4660bafec742SSukumar Swaminathan 		goto out;
4661bafec742SSukumar Swaminathan 	}
4662bafec742SSukumar Swaminathan 	/* Program offset1: upper 16 bits of the MAC address */
4663bafec742SSukumar Swaminathan 	offset = 1;
4664bafec742SSukumar Swaminathan 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4665bafec742SSukumar Swaminathan 	value2 = 0;
4666bafec742SSukumar Swaminathan 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4667bafec742SSukumar Swaminathan 		goto out;
4668bafec742SSukumar Swaminathan 	}
4669bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4670bafec742SSukumar Swaminathan out:
4671bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4672bafec742SSukumar Swaminathan 	return (rtn_val);
4673bafec742SSukumar Swaminathan }
4674bafec742SSukumar Swaminathan 
4675bafec742SSukumar Swaminathan /*
4676bafec742SSukumar Swaminathan  * Add a new multicast address to the list of supported list
4677bafec742SSukumar Swaminathan  * This API is called after OS called gld_set_multicast (GLDv2)
4678bafec742SSukumar Swaminathan  * or m_multicst (GLDv3)
4679bafec742SSukumar Swaminathan  *
4680bafec742SSukumar Swaminathan  * Restriction:
4681bafec742SSukumar Swaminathan  * The number of maximum multicast address is limited by hardware.
4682bafec742SSukumar Swaminathan  */
4683bafec742SSukumar Swaminathan int
ql_add_to_multicast_list(qlge_t * qlge,uint8_t * ep)4684bafec742SSukumar Swaminathan ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4685bafec742SSukumar Swaminathan {
4686bafec742SSukumar Swaminathan 	uint32_t index = qlge->multicast_list_count;
4687bafec742SSukumar Swaminathan 	int rval = DDI_SUCCESS;
4688bafec742SSukumar Swaminathan 	int status;
4689bafec742SSukumar Swaminathan 
4690bafec742SSukumar Swaminathan 	if ((ep[0] & 01) == 0) {
4691bafec742SSukumar Swaminathan 		rval = EINVAL;
4692bafec742SSukumar Swaminathan 		goto exit;
4693bafec742SSukumar Swaminathan 	}
4694bafec742SSukumar Swaminathan 
4695bafec742SSukumar Swaminathan 	/* if there is an availabe space in multicast_list, then add it */
4696bafec742SSukumar Swaminathan 	if (index < MAX_MULTICAST_LIST_SIZE) {
4697bafec742SSukumar Swaminathan 		bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4698bafec742SSukumar Swaminathan 		    ETHERADDRL);
4699bafec742SSukumar Swaminathan 		/* increment the total number of addresses in multicast list */
47000662fbf4SSukumar Swaminathan 		(void) ql_add_multicast_address(qlge, index);
4701bafec742SSukumar Swaminathan 		qlge->multicast_list_count++;
4702bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD,
4703bafec742SSukumar Swaminathan 		    ("%s(%d): added to index of multicast list= 0x%x, "
4704bafec742SSukumar Swaminathan 		    "total %d\n", __func__, qlge->instance, index,
4705bafec742SSukumar Swaminathan 		    qlge->multicast_list_count));
4706bafec742SSukumar Swaminathan 
4707bafec742SSukumar Swaminathan 		if (index > MAX_MULTICAST_HW_SIZE) {
4708bafec742SSukumar Swaminathan 			if (!qlge->multicast_promisc) {
4709bafec742SSukumar Swaminathan 				status = ql_set_routing_reg(qlge,
4710bafec742SSukumar Swaminathan 				    RT_IDX_ALLMULTI_SLOT,
4711bafec742SSukumar Swaminathan 				    RT_IDX_MCAST, 1);
4712bafec742SSukumar Swaminathan 				if (status) {
4713bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
4714bafec742SSukumar Swaminathan 					    "Failed to init routing reg "
4715bafec742SSukumar Swaminathan 					    "for mcast promisc mode.");
4716bafec742SSukumar Swaminathan 					rval = ENOENT;
4717bafec742SSukumar Swaminathan 					goto exit;
4718bafec742SSukumar Swaminathan 				}
4719bafec742SSukumar Swaminathan 				qlge->multicast_promisc = B_TRUE;
4720bafec742SSukumar Swaminathan 			}
4721bafec742SSukumar Swaminathan 		}
4722bafec742SSukumar Swaminathan 	} else {
4723bafec742SSukumar Swaminathan 		rval = ENOENT;
4724bafec742SSukumar Swaminathan 	}
4725bafec742SSukumar Swaminathan exit:
4726bafec742SSukumar Swaminathan 	return (rval);
4727bafec742SSukumar Swaminathan }
4728bafec742SSukumar Swaminathan 
4729bafec742SSukumar Swaminathan /*
4730bafec742SSukumar Swaminathan  * Remove an old multicast address from the list of supported multicast
4731bafec742SSukumar Swaminathan  * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4732bafec742SSukumar Swaminathan  * or m_multicst (GLDv3)
4733bafec742SSukumar Swaminathan  * The number of maximum multicast address is limited by hardware.
4734bafec742SSukumar Swaminathan  */
4735bafec742SSukumar Swaminathan int
ql_remove_from_multicast_list(qlge_t * qlge,uint8_t * ep)4736bafec742SSukumar Swaminathan ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4737bafec742SSukumar Swaminathan {
4738bafec742SSukumar Swaminathan 	uint32_t total = qlge->multicast_list_count;
4739bafec742SSukumar Swaminathan 	int i = 0;
4740bafec742SSukumar Swaminathan 	int rmv_index = 0;
4741bafec742SSukumar Swaminathan 	size_t length = sizeof (ql_multicast_addr);
4742bafec742SSukumar Swaminathan 	int status;
4743bafec742SSukumar Swaminathan 
4744bafec742SSukumar Swaminathan 	for (i = 0; i < total; i++) {
4745bafec742SSukumar Swaminathan 		if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4746bafec742SSukumar Swaminathan 			continue;
4747bafec742SSukumar Swaminathan 		}
4748bafec742SSukumar Swaminathan 
4749bafec742SSukumar Swaminathan 		rmv_index = i;
4750bafec742SSukumar Swaminathan 		/* block move the reset of other multicast address forward */
4751bafec742SSukumar Swaminathan 		length = ((total -1) -i) * sizeof (ql_multicast_addr);
4752bafec742SSukumar Swaminathan 		if (length > 0) {
4753bafec742SSukumar Swaminathan 			bcopy(&qlge->multicast_list[i+1],
4754bafec742SSukumar Swaminathan 			    &qlge->multicast_list[i], length);
4755bafec742SSukumar Swaminathan 		}
4756bafec742SSukumar Swaminathan 		qlge->multicast_list_count--;
4757bafec742SSukumar Swaminathan 		if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4758bafec742SSukumar Swaminathan 			/*
4759bafec742SSukumar Swaminathan 			 * there is a deletion in multicast list table,
4760bafec742SSukumar Swaminathan 			 * re-enable them
4761bafec742SSukumar Swaminathan 			 */
4762bafec742SSukumar Swaminathan 			for (i = rmv_index; i < qlge->multicast_list_count;
4763bafec742SSukumar Swaminathan 			    i++) {
47640662fbf4SSukumar Swaminathan 				(void) ql_add_multicast_address(qlge, i);
4765bafec742SSukumar Swaminathan 			}
4766bafec742SSukumar Swaminathan 			/* and disable the last one */
47670662fbf4SSukumar Swaminathan 			(void) ql_remove_multicast_address(qlge, i);
4768bafec742SSukumar Swaminathan 
4769bafec742SSukumar Swaminathan 			/* disable multicast promiscuous mode */
4770bafec742SSukumar Swaminathan 			if (qlge->multicast_promisc) {
4771bafec742SSukumar Swaminathan 				status = ql_set_routing_reg(qlge,
4772bafec742SSukumar Swaminathan 				    RT_IDX_ALLMULTI_SLOT,
4773bafec742SSukumar Swaminathan 				    RT_IDX_MCAST, 0);
4774bafec742SSukumar Swaminathan 				if (status) {
4775bafec742SSukumar Swaminathan 					cmn_err(CE_WARN,
4776bafec742SSukumar Swaminathan 					    "Failed to init routing reg for "
4777bafec742SSukumar Swaminathan 					    "mcast promisc mode.");
4778bafec742SSukumar Swaminathan 					goto exit;
4779bafec742SSukumar Swaminathan 				}
4780bafec742SSukumar Swaminathan 				/* write to config register */
4781bafec742SSukumar Swaminathan 				qlge->multicast_promisc = B_FALSE;
4782bafec742SSukumar Swaminathan 			}
4783bafec742SSukumar Swaminathan 		}
4784bafec742SSukumar Swaminathan 		break;
4785bafec742SSukumar Swaminathan 	}
4786bafec742SSukumar Swaminathan exit:
4787bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
4788bafec742SSukumar Swaminathan }
4789bafec742SSukumar Swaminathan 
4790bafec742SSukumar Swaminathan /*
4791bafec742SSukumar Swaminathan  * Read a XGMAC register
4792bafec742SSukumar Swaminathan  */
4793bafec742SSukumar Swaminathan int
ql_read_xgmac_reg(qlge_t * qlge,uint32_t addr,uint32_t * val)4794bafec742SSukumar Swaminathan ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4795bafec742SSukumar Swaminathan {
4796bafec742SSukumar Swaminathan 	int rtn_val = DDI_FAILURE;
4797bafec742SSukumar Swaminathan 
4798bafec742SSukumar Swaminathan 	/* wait for XGMAC Address register RDY bit set */
4799bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4800bafec742SSukumar Swaminathan 	    BIT_SET, 10) != DDI_SUCCESS) {
4801bafec742SSukumar Swaminathan 		goto out;
4802bafec742SSukumar Swaminathan 	}
4803bafec742SSukumar Swaminathan 	/* start rx transaction */
4804bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4805bafec742SSukumar Swaminathan 
4806bafec742SSukumar Swaminathan 	/*
4807bafec742SSukumar Swaminathan 	 * wait for XGMAC Address register RDY bit set,
4808bafec742SSukumar Swaminathan 	 * which indicates data is ready
4809bafec742SSukumar Swaminathan 	 */
4810bafec742SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4811bafec742SSukumar Swaminathan 	    BIT_SET, 10) != DDI_SUCCESS) {
4812bafec742SSukumar Swaminathan 		goto out;
4813bafec742SSukumar Swaminathan 	}
4814bafec742SSukumar Swaminathan 	/* read data from XGAMC_DATA register */
4815bafec742SSukumar Swaminathan 	*val = ql_read_reg(qlge, REG_XGMAC_DATA);
4816bafec742SSukumar Swaminathan 	rtn_val = DDI_SUCCESS;
4817bafec742SSukumar Swaminathan out:
4818bafec742SSukumar Swaminathan 	return (rtn_val);
4819bafec742SSukumar Swaminathan }
4820bafec742SSukumar Swaminathan 
4821bafec742SSukumar Swaminathan /*
4822bafec742SSukumar Swaminathan  * Implement checksum offload for IPv4 IP packets
4823bafec742SSukumar Swaminathan  */
4824bafec742SSukumar Swaminathan static void
ql_hw_csum_setup(qlge_t * qlge,uint32_t pflags,caddr_t bp,struct ob_mac_iocb_req * mac_iocb_ptr)4825bafec742SSukumar Swaminathan ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4826bafec742SSukumar Swaminathan     struct ob_mac_iocb_req *mac_iocb_ptr)
4827bafec742SSukumar Swaminathan {
4828bafec742SSukumar Swaminathan 	struct ip *iphdr = NULL;
4829bafec742SSukumar Swaminathan 	struct ether_header *ethhdr;
4830bafec742SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
4831bafec742SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
4832bafec742SSukumar Swaminathan 	uint32_t etherType;
4833bafec742SSukumar Swaminathan 	int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4834bafec742SSukumar Swaminathan 	int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4835bafec742SSukumar Swaminathan 
4836bafec742SSukumar Swaminathan 	ethhdr  = (struct ether_header *)((void *)bp);
4837bafec742SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4838bafec742SSukumar Swaminathan 	/* Is this vlan packet? */
4839bafec742SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4840bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
4841bafec742SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
4842bafec742SSukumar Swaminathan 	} else {
4843bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
4844bafec742SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
4845bafec742SSukumar Swaminathan 	}
4846bafec742SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
4847bafec742SSukumar Swaminathan 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4848bafec742SSukumar Swaminathan 	    IPV4_VERSION) {
4849bafec742SSukumar Swaminathan 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4850bafec742SSukumar Swaminathan 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4851bafec742SSukumar Swaminathan 		} else {
4852bafec742SSukumar Swaminathan 			/* EMPTY */
4853bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX,
4854bafec742SSukumar Swaminathan 			    ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4855bafec742SSukumar Swaminathan 			    __func__, qlge->instance, etherType));
4856bafec742SSukumar Swaminathan 		}
4857bafec742SSukumar Swaminathan 	}
4858bafec742SSukumar Swaminathan 	/* ipV4 packets */
4859bafec742SSukumar Swaminathan 	if (iphdr != NULL) {
4860bafec742SSukumar Swaminathan 
4861bafec742SSukumar Swaminathan 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4862bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX,
4863bafec742SSukumar Swaminathan 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4864bafec742SSukumar Swaminathan 		    " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4865bafec742SSukumar Swaminathan 
4866bafec742SSukumar Swaminathan 		ip_hdr_off = mac_hdr_len;
4867bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4868bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ip_hdr_len));
4869bafec742SSukumar Swaminathan 
4870bafec742SSukumar Swaminathan 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4871bafec742SSukumar Swaminathan 		    OB_MAC_IOCB_REQ_IPv4);
4872bafec742SSukumar Swaminathan 
4873bafec742SSukumar Swaminathan 		if (pflags & HCK_IPV4_HDRCKSUM) {
4874bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4875bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
4876bafec742SSukumar Swaminathan 			mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4877bafec742SSukumar Swaminathan 			mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4878bafec742SSukumar Swaminathan 			    OB_MAC_IOCB_REQ_IC);
4879bafec742SSukumar Swaminathan 			iphdr->ip_sum = 0;
4880bafec742SSukumar Swaminathan 			mac_iocb_ptr->hdr_off = (uint16_t)
4881bafec742SSukumar Swaminathan 			    cpu_to_le16(ip_hdr_off);
4882bafec742SSukumar Swaminathan 		}
4883bafec742SSukumar Swaminathan 		if (pflags & HCK_FULLCKSUM) {
4884bafec742SSukumar Swaminathan 			if (iphdr->ip_p == IPPROTO_TCP) {
4885bafec742SSukumar Swaminathan 				tcp_hdr =
4886bafec742SSukumar Swaminathan 				    (struct tcphdr *)(void *)
4887bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr + ip_hdr_len);
4888bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4889bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4890bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4891bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4892bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4893bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4894bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_TC);
4895bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag2 =
4896bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag2 |
4897bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_IC);
4898bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4899bafec742SSukumar Swaminathan 				tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4900bafec742SSukumar Swaminathan 				tcp_udp_hdr_len = tcp_hdr->th_off*4;
4901bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4902bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4903bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4904bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4905bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4906bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4907bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4908bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4909bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4910bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
4911bafec742SSukumar Swaminathan 
4912bafec742SSukumar Swaminathan 				/*
4913bafec742SSukumar Swaminathan 				 * if the chip is unable to do pseudo header
4914bafec742SSukumar Swaminathan 				 * cksum calculation, do it in then put the
4915bafec742SSukumar Swaminathan 				 * result to the data passed to the chip
4916bafec742SSukumar Swaminathan 				 */
4917bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4918bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4919bafec742SSukumar Swaminathan 					ql_pseudo_cksum((uint8_t *)iphdr);
4920bafec742SSukumar Swaminathan 				}
4921bafec742SSukumar Swaminathan 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4922bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4923bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
4924bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
4925bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4926bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
4927bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
4928bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_UC);
4929bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag2 =
4930bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag2 |
4931bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_IC);
4932bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
4933bafec742SSukumar Swaminathan 				tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4934bafec742SSukumar Swaminathan 				tcp_udp_hdr_len = sizeof (struct udphdr);
4935bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4936bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
4937bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
4938bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
4939bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
4940bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
4941bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
4942bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4943bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len
4944bafec742SSukumar Swaminathan 				    + tcp_udp_hdr_len);
4945bafec742SSukumar Swaminathan 
4946bafec742SSukumar Swaminathan 				/*
4947bafec742SSukumar Swaminathan 				 * if the chip is unable to calculate pseudo
4948bafec742SSukumar Swaminathan 				 * hdr cksum,do it in then put the result to
4949bafec742SSukumar Swaminathan 				 * the data passed to the chip
4950bafec742SSukumar Swaminathan 				 */
4951bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
4952bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4953bafec742SSukumar Swaminathan 					ql_pseudo_cksum((uint8_t *)iphdr);
4954bafec742SSukumar Swaminathan 				}
4955bafec742SSukumar Swaminathan 			}
4956bafec742SSukumar Swaminathan 		}
4957bafec742SSukumar Swaminathan 	}
4958bafec742SSukumar Swaminathan }
4959bafec742SSukumar Swaminathan 
4960bafec742SSukumar Swaminathan /*
4961bafec742SSukumar Swaminathan  * For TSO/LSO:
4962bafec742SSukumar Swaminathan  * MAC frame transmission with TCP large segment offload is performed in the
4963bafec742SSukumar Swaminathan  * same way as the MAC frame transmission with checksum offload with the
4964bafec742SSukumar Swaminathan  * exception that the maximum TCP segment size (MSS) must be specified to
4965bafec742SSukumar Swaminathan  * allow the chip to segment the data into legal sized frames.
4966bafec742SSukumar Swaminathan  * The host also needs to calculate a pseudo-header checksum over the
4967bafec742SSukumar Swaminathan  * following fields:
4968bafec742SSukumar Swaminathan  * Source IP Address, Destination IP Address, and the Protocol.
4969bafec742SSukumar Swaminathan  * The TCP length is not included in the pseudo-header calculation.
4970bafec742SSukumar Swaminathan  * The pseudo-header checksum is place in the TCP checksum field of the
4971bafec742SSukumar Swaminathan  * prototype header.
4972bafec742SSukumar Swaminathan  */
4973bafec742SSukumar Swaminathan static void
ql_lso_pseudo_cksum(uint8_t * buf)4974bafec742SSukumar Swaminathan ql_lso_pseudo_cksum(uint8_t *buf)
4975bafec742SSukumar Swaminathan {
4976bafec742SSukumar Swaminathan 	uint32_t cksum;
4977bafec742SSukumar Swaminathan 	uint16_t iphl;
4978bafec742SSukumar Swaminathan 	uint16_t proto;
4979bafec742SSukumar Swaminathan 
4980bafec742SSukumar Swaminathan 	/*
4981bafec742SSukumar Swaminathan 	 * Calculate the LSO pseudo-header checksum.
4982bafec742SSukumar Swaminathan 	 */
4983bafec742SSukumar Swaminathan 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
4984bafec742SSukumar Swaminathan 	cksum = proto = buf[9];
4985bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
4986bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
4987bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
4988bafec742SSukumar Swaminathan 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
4989bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4990bafec742SSukumar Swaminathan 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4991bafec742SSukumar Swaminathan 
4992bafec742SSukumar Swaminathan 	/*
4993bafec742SSukumar Swaminathan 	 * Point it to the TCP/UDP header, and
4994bafec742SSukumar Swaminathan 	 * update the checksum field.
4995bafec742SSukumar Swaminathan 	 */
4996bafec742SSukumar Swaminathan 	buf += iphl + ((proto == IPPROTO_TCP) ?
4997bafec742SSukumar Swaminathan 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4998bafec742SSukumar Swaminathan 
4999bafec742SSukumar Swaminathan 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
5000bafec742SSukumar Swaminathan }
5001bafec742SSukumar Swaminathan 
5002accf27a5SSukumar Swaminathan /*
5003accf27a5SSukumar Swaminathan  * For IPv4 IP packets, distribute the tx packets evenly among tx rings
5004accf27a5SSukumar Swaminathan  */
5005accf27a5SSukumar Swaminathan typedef	uint32_t	ub4; /* unsigned 4-byte quantities */
5006accf27a5SSukumar Swaminathan typedef	uint8_t		ub1;
5007accf27a5SSukumar Swaminathan 
5008accf27a5SSukumar Swaminathan #define	hashsize(n)	((ub4)1<<(n))
5009accf27a5SSukumar Swaminathan #define	hashmask(n)	(hashsize(n)-1)
5010accf27a5SSukumar Swaminathan 
5011accf27a5SSukumar Swaminathan #define	mix(a, b, c) \
5012accf27a5SSukumar Swaminathan { \
5013accf27a5SSukumar Swaminathan 	a -= b; a -= c; a ^= (c>>13); \
5014accf27a5SSukumar Swaminathan 	b -= c; b -= a; b ^= (a<<8); \
5015accf27a5SSukumar Swaminathan 	c -= a; c -= b; c ^= (b>>13); \
5016accf27a5SSukumar Swaminathan 	a -= b; a -= c; a ^= (c>>12);  \
5017accf27a5SSukumar Swaminathan 	b -= c; b -= a; b ^= (a<<16); \
5018accf27a5SSukumar Swaminathan 	c -= a; c -= b; c ^= (b>>5); \
5019accf27a5SSukumar Swaminathan 	a -= b; a -= c; a ^= (c>>3);  \
5020accf27a5SSukumar Swaminathan 	b -= c; b -= a; b ^= (a<<10); \
5021accf27a5SSukumar Swaminathan 	c -= a; c -= b; c ^= (b>>15); \
5022accf27a5SSukumar Swaminathan }
5023accf27a5SSukumar Swaminathan 
5024accf27a5SSukumar Swaminathan ub4
hash(ub1 * k,ub4 length,ub4 initval)502595369d7bSToomas Soome hash(ub1 *k, ub4 length, ub4 initval)
5026accf27a5SSukumar Swaminathan {
502795369d7bSToomas Soome 	ub4 a, b, c, len;
5028accf27a5SSukumar Swaminathan 
5029accf27a5SSukumar Swaminathan 	/* Set up the internal state */
5030accf27a5SSukumar Swaminathan 	len = length;
5031accf27a5SSukumar Swaminathan 	a = b = 0x9e3779b9;	/* the golden ratio; an arbitrary value */
5032accf27a5SSukumar Swaminathan 	c = initval;		/* the previous hash value */
5033accf27a5SSukumar Swaminathan 
5034accf27a5SSukumar Swaminathan 	/* handle most of the key */
5035accf27a5SSukumar Swaminathan 	while (len >= 12) {
5036accf27a5SSukumar Swaminathan 		a += (k[0] +((ub4)k[1]<<8) +((ub4)k[2]<<16) +((ub4)k[3]<<24));
5037accf27a5SSukumar Swaminathan 		b += (k[4] +((ub4)k[5]<<8) +((ub4)k[6]<<16) +((ub4)k[7]<<24));
5038accf27a5SSukumar Swaminathan 		c += (k[8] +((ub4)k[9]<<8) +((ub4)k[10]<<16)+((ub4)k[11]<<24));
5039accf27a5SSukumar Swaminathan 		mix(a, b, c);
5040accf27a5SSukumar Swaminathan 		k += 12;
5041accf27a5SSukumar Swaminathan 		len -= 12;
5042accf27a5SSukumar Swaminathan 	}
5043accf27a5SSukumar Swaminathan 
5044accf27a5SSukumar Swaminathan 	/* handle the last 11 bytes */
5045accf27a5SSukumar Swaminathan 	c += length;
5046accf27a5SSukumar Swaminathan 	/* all the case statements fall through */
5047accf27a5SSukumar Swaminathan 	switch (len) {
5048accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5049accf27a5SSukumar Swaminathan 	case 11: c += ((ub4)k[10]<<24);
5050accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5051accf27a5SSukumar Swaminathan 	case 10: c += ((ub4)k[9]<<16);
5052accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5053accf27a5SSukumar Swaminathan 	case 9 : c += ((ub4)k[8]<<8);
5054accf27a5SSukumar Swaminathan 	/* the first byte of c is reserved for the length */
5055accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5056accf27a5SSukumar Swaminathan 	case 8 : b += ((ub4)k[7]<<24);
5057accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5058accf27a5SSukumar Swaminathan 	case 7 : b += ((ub4)k[6]<<16);
5059accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5060accf27a5SSukumar Swaminathan 	case 6 : b += ((ub4)k[5]<<8);
5061accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5062accf27a5SSukumar Swaminathan 	case 5 : b += k[4];
5063accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5064accf27a5SSukumar Swaminathan 	case 4 : a += ((ub4)k[3]<<24);
5065accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5066accf27a5SSukumar Swaminathan 	case 3 : a += ((ub4)k[2]<<16);
5067accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5068accf27a5SSukumar Swaminathan 	case 2 : a += ((ub4)k[1]<<8);
5069accf27a5SSukumar Swaminathan 		/* FALLTHRU */
5070accf27a5SSukumar Swaminathan 	case 1 : a += k[0];
5071accf27a5SSukumar Swaminathan 	/* case 0: nothing left to add */
5072accf27a5SSukumar Swaminathan 	}
5073accf27a5SSukumar Swaminathan 	mix(a, b, c);
5074accf27a5SSukumar Swaminathan 	/* report the result */
5075accf27a5SSukumar Swaminathan 	return (c);
5076accf27a5SSukumar Swaminathan }
5077accf27a5SSukumar Swaminathan 
5078accf27a5SSukumar Swaminathan uint8_t
ql_tx_hashing(qlge_t * qlge,caddr_t bp)5079accf27a5SSukumar Swaminathan ql_tx_hashing(qlge_t *qlge, caddr_t bp)
5080accf27a5SSukumar Swaminathan {
5081accf27a5SSukumar Swaminathan 	struct ip *iphdr = NULL;
5082accf27a5SSukumar Swaminathan 	struct ether_header *ethhdr;
5083accf27a5SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
5084accf27a5SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
5085accf27a5SSukumar Swaminathan 	struct udphdr *udp_hdr;
5086accf27a5SSukumar Swaminathan 	uint32_t etherType;
5087accf27a5SSukumar Swaminathan 	int mac_hdr_len, ip_hdr_len;
5088accf27a5SSukumar Swaminathan 	uint32_t h = 0; /* 0 by default */
5089accf27a5SSukumar Swaminathan 	uint8_t tx_ring_id = 0;
5090accf27a5SSukumar Swaminathan 	uint32_t ip_src_addr = 0;
5091accf27a5SSukumar Swaminathan 	uint32_t ip_desc_addr = 0;
5092accf27a5SSukumar Swaminathan 	uint16_t src_port = 0;
5093accf27a5SSukumar Swaminathan 	uint16_t dest_port = 0;
5094accf27a5SSukumar Swaminathan 	uint8_t key[12];
5095accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("%s(%d) entered \n", __func__, qlge->instance));
5096accf27a5SSukumar Swaminathan 
5097accf27a5SSukumar Swaminathan 	ethhdr = (struct ether_header *)((void *)bp);
5098accf27a5SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
5099accf27a5SSukumar Swaminathan 
5100accf27a5SSukumar Swaminathan 	if (qlge->tx_ring_count == 1)
5101accf27a5SSukumar Swaminathan 		return (tx_ring_id);
5102accf27a5SSukumar Swaminathan 
5103accf27a5SSukumar Swaminathan 	/* Is this vlan packet? */
5104accf27a5SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5105accf27a5SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
5106accf27a5SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
5107accf27a5SSukumar Swaminathan 	} else {
5108accf27a5SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
5109accf27a5SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
5110accf27a5SSukumar Swaminathan 	}
5111accf27a5SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
5112accf27a5SSukumar Swaminathan 	if (etherType == ETHERTYPE_IP /* 0800 */) {
5113accf27a5SSukumar Swaminathan 		if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len))
5114accf27a5SSukumar Swaminathan 		    == IPV4_VERSION) {
5115accf27a5SSukumar Swaminathan 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5116accf27a5SSukumar Swaminathan 		}
5117accf27a5SSukumar Swaminathan 		if (((unsigned long)iphdr) & 0x3) {
5118accf27a5SSukumar Swaminathan 			/*  IP hdr not 4-byte aligned */
5119accf27a5SSukumar Swaminathan 			return (tx_ring_id);
5120accf27a5SSukumar Swaminathan 		}
5121accf27a5SSukumar Swaminathan 	}
5122accf27a5SSukumar Swaminathan 	/* ipV4 packets */
5123accf27a5SSukumar Swaminathan 	if (iphdr) {
5124accf27a5SSukumar Swaminathan 
5125accf27a5SSukumar Swaminathan 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
5126accf27a5SSukumar Swaminathan 		ip_src_addr = iphdr->ip_src.s_addr;
5127accf27a5SSukumar Swaminathan 		ip_desc_addr = iphdr->ip_dst.s_addr;
5128accf27a5SSukumar Swaminathan 
5129accf27a5SSukumar Swaminathan 		if (iphdr->ip_p == IPPROTO_TCP) {
5130accf27a5SSukumar Swaminathan 			tcp_hdr = (struct tcphdr *)(void *)
5131accf27a5SSukumar Swaminathan 			    ((uint8_t *)iphdr + ip_hdr_len);
5132accf27a5SSukumar Swaminathan 			src_port = tcp_hdr->th_sport;
5133accf27a5SSukumar Swaminathan 			dest_port = tcp_hdr->th_dport;
5134accf27a5SSukumar Swaminathan 		} else if (iphdr->ip_p == IPPROTO_UDP) {
5135accf27a5SSukumar Swaminathan 			udp_hdr = (struct udphdr *)(void *)
5136accf27a5SSukumar Swaminathan 			    ((uint8_t *)iphdr + ip_hdr_len);
5137accf27a5SSukumar Swaminathan 			src_port = udp_hdr->uh_sport;
5138accf27a5SSukumar Swaminathan 			dest_port = udp_hdr->uh_dport;
5139accf27a5SSukumar Swaminathan 		}
5140accf27a5SSukumar Swaminathan 		key[0] = (uint8_t)((ip_src_addr) &0xFF);
5141accf27a5SSukumar Swaminathan 		key[1] = (uint8_t)((ip_src_addr >> 8) &0xFF);
5142accf27a5SSukumar Swaminathan 		key[2] = (uint8_t)((ip_src_addr >> 16) &0xFF);
5143accf27a5SSukumar Swaminathan 		key[3] = (uint8_t)((ip_src_addr >> 24) &0xFF);
5144accf27a5SSukumar Swaminathan 		key[4] = (uint8_t)((ip_desc_addr) &0xFF);
5145accf27a5SSukumar Swaminathan 		key[5] = (uint8_t)((ip_desc_addr >> 8) &0xFF);
5146accf27a5SSukumar Swaminathan 		key[6] = (uint8_t)((ip_desc_addr >> 16) &0xFF);
5147accf27a5SSukumar Swaminathan 		key[7] = (uint8_t)((ip_desc_addr >> 24) &0xFF);
5148accf27a5SSukumar Swaminathan 		key[8] = (uint8_t)((src_port) &0xFF);
5149accf27a5SSukumar Swaminathan 		key[9] = (uint8_t)((src_port >> 8) &0xFF);
5150accf27a5SSukumar Swaminathan 		key[10] = (uint8_t)((dest_port) &0xFF);
5151accf27a5SSukumar Swaminathan 		key[11] = (uint8_t)((dest_port >> 8) &0xFF);
5152accf27a5SSukumar Swaminathan 		h = hash(key, 12, 0); /* return 32 bit */
5153accf27a5SSukumar Swaminathan 		tx_ring_id = (h & (qlge->tx_ring_count - 1));
5154accf27a5SSukumar Swaminathan 		if (tx_ring_id >= qlge->tx_ring_count) {
5155accf27a5SSukumar Swaminathan 			cmn_err(CE_WARN, "%s bad tx_ring_id %d\n",
5156accf27a5SSukumar Swaminathan 			    __func__, tx_ring_id);
5157accf27a5SSukumar Swaminathan 			tx_ring_id = 0;
5158accf27a5SSukumar Swaminathan 		}
5159accf27a5SSukumar Swaminathan 	}
5160accf27a5SSukumar Swaminathan 	return (tx_ring_id);
5161accf27a5SSukumar Swaminathan }
5162accf27a5SSukumar Swaminathan 
5163bafec742SSukumar Swaminathan /*
5164bafec742SSukumar Swaminathan  * Tell the hardware to do Large Send Offload (LSO)
5165bafec742SSukumar Swaminathan  *
5166bafec742SSukumar Swaminathan  * Some fields in ob_mac_iocb need to be set so hardware can know what is
5167bafec742SSukumar Swaminathan  * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
5168bafec742SSukumar Swaminathan  * in the right place of the packet etc, thus, hardware can process the
5169bafec742SSukumar Swaminathan  * packet correctly.
5170bafec742SSukumar Swaminathan  */
5171bafec742SSukumar Swaminathan static void
ql_hw_lso_setup(qlge_t * qlge,uint32_t mss,caddr_t bp,struct ob_mac_iocb_req * mac_iocb_ptr)5172bafec742SSukumar Swaminathan ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
5173bafec742SSukumar Swaminathan     struct ob_mac_iocb_req *mac_iocb_ptr)
5174bafec742SSukumar Swaminathan {
5175bafec742SSukumar Swaminathan 	struct ip *iphdr = NULL;
5176bafec742SSukumar Swaminathan 	struct ether_header *ethhdr;
5177bafec742SSukumar Swaminathan 	struct ether_vlan_header *ethvhdr;
5178bafec742SSukumar Swaminathan 	struct tcphdr *tcp_hdr;
5179bafec742SSukumar Swaminathan 	struct udphdr *udp_hdr;
5180bafec742SSukumar Swaminathan 	uint32_t etherType;
5181bafec742SSukumar Swaminathan 	uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
5182bafec742SSukumar Swaminathan 	uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
5183bafec742SSukumar Swaminathan 
5184bafec742SSukumar Swaminathan 	ethhdr = (struct ether_header *)(void *)bp;
5185bafec742SSukumar Swaminathan 	ethvhdr = (struct ether_vlan_header *)(void *)bp;
5186bafec742SSukumar Swaminathan 
5187bafec742SSukumar Swaminathan 	/* Is this vlan packet? */
5188bafec742SSukumar Swaminathan 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5189bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_vlan_header);
5190bafec742SSukumar Swaminathan 		etherType = ntohs(ethvhdr->ether_type);
5191bafec742SSukumar Swaminathan 	} else {
5192bafec742SSukumar Swaminathan 		mac_hdr_len = sizeof (struct ether_header);
5193bafec742SSukumar Swaminathan 		etherType = ntohs(ethhdr->ether_type);
5194bafec742SSukumar Swaminathan 	}
5195bafec742SSukumar Swaminathan 	/* Is this IPv4 or IPv6 packet? */
5196bafec742SSukumar Swaminathan 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
5197bafec742SSukumar Swaminathan 	    IPV4_VERSION) {
5198bafec742SSukumar Swaminathan 		if (etherType == ETHERTYPE_IP /* 0800 */) {
519995369d7bSToomas Soome 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5200bafec742SSukumar Swaminathan 		} else {
5201bafec742SSukumar Swaminathan 			/* EMPTY */
5202bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
5203bafec742SSukumar Swaminathan 			    " type 0x%x\n",
5204bafec742SSukumar Swaminathan 			    __func__, qlge->instance, etherType));
5205bafec742SSukumar Swaminathan 		}
5206bafec742SSukumar Swaminathan 	}
5207bafec742SSukumar Swaminathan 
5208bafec742SSukumar Swaminathan 	if (iphdr != NULL) { /* ipV4 packets */
5209bafec742SSukumar Swaminathan 		ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
5210bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX,
5211bafec742SSukumar Swaminathan 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
5212bafec742SSukumar Swaminathan 		    " bytes \n", __func__, qlge->instance, ip_hdr_len));
5213bafec742SSukumar Swaminathan 
5214bafec742SSukumar Swaminathan 		ip_hdr_off = mac_hdr_len;
5215bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
5216bafec742SSukumar Swaminathan 		    __func__, qlge->instance, ip_hdr_len));
5217bafec742SSukumar Swaminathan 
5218bafec742SSukumar Swaminathan 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
5219bafec742SSukumar Swaminathan 		    OB_MAC_IOCB_REQ_IPv4);
5220bafec742SSukumar Swaminathan 		if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
5221bafec742SSukumar Swaminathan 			if (iphdr->ip_p == IPPROTO_TCP) {
5222bafec742SSukumar Swaminathan 				tcp_hdr = (struct tcphdr *)(void *)
5223bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr +
5224bafec742SSukumar Swaminathan 				    ip_hdr_len);
5225bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
5226bafec742SSukumar Swaminathan 				    "packet\n",
5227bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
5228bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
5229bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5230bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
5231bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
5232bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_LSO);
5233bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
5234bafec742SSukumar Swaminathan 				tcp_udp_hdr_off =
5235bafec742SSukumar Swaminathan 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5236bafec742SSukumar Swaminathan 				tcp_udp_hdr_len =
5237bafec742SSukumar Swaminathan 				    (uint16_t)(tcp_hdr->th_off*4);
5238bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
5239bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
5240bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
5241bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
5242bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
5243bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
5244bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
5245bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5246bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5247bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
5248bafec742SSukumar Swaminathan 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5249bafec742SSukumar Swaminathan 
5250bafec742SSukumar Swaminathan 				/*
5251bafec742SSukumar Swaminathan 				 * if the chip is unable to calculate pseudo
5252bafec742SSukumar Swaminathan 				 * header checksum, do it in then put the result
5253bafec742SSukumar Swaminathan 				 * to the data passed to the chip
5254bafec742SSukumar Swaminathan 				 */
5255bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
5256bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5257bafec742SSukumar Swaminathan 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5258bafec742SSukumar Swaminathan 			} else if (iphdr->ip_p == IPPROTO_UDP) {
5259bafec742SSukumar Swaminathan 				udp_hdr = (struct udphdr *)(void *)
5260bafec742SSukumar Swaminathan 				    ((uint8_t *)(void *)iphdr
5261bafec742SSukumar Swaminathan 				    + ip_hdr_len);
5262bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
5263bafec742SSukumar Swaminathan 				    "packet\n",
5264bafec742SSukumar Swaminathan 				    __func__, qlge->instance));
5265bafec742SSukumar Swaminathan 				mac_iocb_ptr->opcode =
5266bafec742SSukumar Swaminathan 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5267bafec742SSukumar Swaminathan 				mac_iocb_ptr->flag1 =
5268bafec742SSukumar Swaminathan 				    (uint8_t)(mac_iocb_ptr->flag1 |
5269bafec742SSukumar Swaminathan 				    OB_MAC_IOCB_REQ_LSO);
5270bafec742SSukumar Swaminathan 				iphdr->ip_sum = 0;
5271bafec742SSukumar Swaminathan 				tcp_udp_hdr_off =
5272bafec742SSukumar Swaminathan 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5273bafec742SSukumar Swaminathan 				tcp_udp_hdr_len =
5274bafec742SSukumar Swaminathan 				    (uint16_t)(udp_hdr->uh_ulen*4);
5275bafec742SSukumar Swaminathan 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
5276bafec742SSukumar Swaminathan 				    __func__, qlge->instance, tcp_udp_hdr_len));
5277bafec742SSukumar Swaminathan 				hdr_off = ip_hdr_off;
5278bafec742SSukumar Swaminathan 				tcp_udp_hdr_off <<= 6;
5279bafec742SSukumar Swaminathan 				hdr_off |= tcp_udp_hdr_off;
5280bafec742SSukumar Swaminathan 				mac_iocb_ptr->hdr_off =
5281bafec742SSukumar Swaminathan 				    (uint16_t)cpu_to_le16(hdr_off);
5282bafec742SSukumar Swaminathan 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5283bafec742SSukumar Swaminathan 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5284bafec742SSukumar Swaminathan 				    tcp_udp_hdr_len);
5285bafec742SSukumar Swaminathan 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5286bafec742SSukumar Swaminathan 
5287bafec742SSukumar Swaminathan 				/*
5288bafec742SSukumar Swaminathan 				 * if the chip is unable to do pseudo header
5289bafec742SSukumar Swaminathan 				 * checksum calculation, do it here then put the
5290bafec742SSukumar Swaminathan 				 * result to the data passed to the chip
5291bafec742SSukumar Swaminathan 				 */
5292bafec742SSukumar Swaminathan 				if (qlge->cfg_flags &
5293bafec742SSukumar Swaminathan 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5294bafec742SSukumar Swaminathan 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5295bafec742SSukumar Swaminathan 			}
5296bafec742SSukumar Swaminathan 		}
5297bafec742SSukumar Swaminathan 	}
5298bafec742SSukumar Swaminathan }
5299bafec742SSukumar Swaminathan 
5300bafec742SSukumar Swaminathan /*
5301bafec742SSukumar Swaminathan  * Generic packet sending function which is used to send one packet.
5302bafec742SSukumar Swaminathan  */
5303bafec742SSukumar Swaminathan int
ql_send_common(struct tx_ring * tx_ring,mblk_t * mp)5304bafec742SSukumar Swaminathan ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
5305bafec742SSukumar Swaminathan {
5306bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_cb;
5307bafec742SSukumar Swaminathan 	struct ob_mac_iocb_req *mac_iocb_ptr;
5308bafec742SSukumar Swaminathan 	mblk_t *tp;
5309bafec742SSukumar Swaminathan 	size_t msg_len = 0;
5310bafec742SSukumar Swaminathan 	size_t off;
5311bafec742SSukumar Swaminathan 	caddr_t bp;
5312bafec742SSukumar Swaminathan 	size_t nbyte, total_len;
5313bafec742SSukumar Swaminathan 	uint_t i = 0;
5314bafec742SSukumar Swaminathan 	int j = 0, frags = 0;
5315bafec742SSukumar Swaminathan 	uint32_t phy_addr_low, phy_addr_high;
5316bafec742SSukumar Swaminathan 	uint64_t phys_addr;
5317bafec742SSukumar Swaminathan 	clock_t now;
5318bafec742SSukumar Swaminathan 	uint32_t pflags = 0;
5319bafec742SSukumar Swaminathan 	uint32_t mss = 0;
5320bafec742SSukumar Swaminathan 	enum tx_mode_t tx_mode;
5321bafec742SSukumar Swaminathan 	struct oal_entry *oal_entry;
5322bafec742SSukumar Swaminathan 	int status;
5323bafec742SSukumar Swaminathan 	uint_t ncookies, oal_entries, max_oal_entries;
5324bafec742SSukumar Swaminathan 	size_t max_seg_len = 0;
5325bafec742SSukumar Swaminathan 	boolean_t use_lso = B_FALSE;
5326bafec742SSukumar Swaminathan 	struct oal_entry *tx_entry = NULL;
5327bafec742SSukumar Swaminathan 	struct oal_entry *last_oal_entry;
5328bafec742SSukumar Swaminathan 	qlge_t *qlge = tx_ring->qlge;
5329bafec742SSukumar Swaminathan 	ddi_dma_cookie_t dma_cookie;
5330bafec742SSukumar Swaminathan 	size_t tx_buf_len = QL_MAX_COPY_LENGTH;
5331bafec742SSukumar Swaminathan 	int force_pullup = 0;
5332bafec742SSukumar Swaminathan 
5333bafec742SSukumar Swaminathan 	tp = mp;
5334bafec742SSukumar Swaminathan 	total_len = msg_len = 0;
5335bafec742SSukumar Swaminathan 	max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
5336bafec742SSukumar Swaminathan 
5337bafec742SSukumar Swaminathan 	/* Calculate number of data and segments in the incoming message */
5338bafec742SSukumar Swaminathan 	for (tp = mp; tp != NULL; tp = tp->b_cont) {
5339bafec742SSukumar Swaminathan 		nbyte = MBLKL(tp);
5340bafec742SSukumar Swaminathan 		total_len += nbyte;
5341bafec742SSukumar Swaminathan 		max_seg_len = max(nbyte, max_seg_len);
5342bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
5343bafec742SSukumar Swaminathan 		    "total length: %d\n", frags, nbyte));
5344bafec742SSukumar Swaminathan 		frags++;
5345bafec742SSukumar Swaminathan 	}
5346bafec742SSukumar Swaminathan 
5347bafec742SSukumar Swaminathan 	if (total_len >= QL_LSO_MAX) {
5348bafec742SSukumar Swaminathan 		freemsg(mp);
5349bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5350bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
5351bafec742SSukumar Swaminathan 		    __func__, (int)total_len);
5352bafec742SSukumar Swaminathan #endif
535395369d7bSToomas Soome 		return (0);
5354bafec742SSukumar Swaminathan 	}
5355bafec742SSukumar Swaminathan 
5356bafec742SSukumar Swaminathan 	bp = (caddr_t)mp->b_rptr;
5357bafec742SSukumar Swaminathan 	if (bp[0] & 1) {
5358bafec742SSukumar Swaminathan 		if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
5359bafec742SSukumar Swaminathan 		    ETHERADDRL) == 0) {
5360bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("Broadcast packet\n"));
5361bafec742SSukumar Swaminathan 			tx_ring->brdcstxmt++;
5362bafec742SSukumar Swaminathan 		} else {
5363bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("multicast packet\n"));
5364bafec742SSukumar Swaminathan 			tx_ring->multixmt++;
5365bafec742SSukumar Swaminathan 		}
5366bafec742SSukumar Swaminathan 	}
5367bafec742SSukumar Swaminathan 
5368bafec742SSukumar Swaminathan 	tx_ring->obytes += total_len;
5369bafec742SSukumar Swaminathan 	tx_ring->opackets ++;
5370bafec742SSukumar Swaminathan 
5371bafec742SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
5372bafec742SSukumar Swaminathan 	    " max seg len: %d\n", total_len, frags, max_seg_len));
5373bafec742SSukumar Swaminathan 
5374bafec742SSukumar Swaminathan 	/* claim a free slot in tx ring */
5375bafec742SSukumar Swaminathan 	tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
5376bafec742SSukumar Swaminathan 
5377bafec742SSukumar Swaminathan 	/* get the tx descriptor */
5378bafec742SSukumar Swaminathan 	mac_iocb_ptr = tx_cb->queue_entry;
5379bafec742SSukumar Swaminathan 
5380accf27a5SSukumar Swaminathan 	bzero((void *)mac_iocb_ptr, 20);
5381bafec742SSukumar Swaminathan 
5382bafec742SSukumar Swaminathan 	ASSERT(tx_cb->mp == NULL);
5383bafec742SSukumar Swaminathan 
5384bafec742SSukumar Swaminathan 	/*
5385bafec742SSukumar Swaminathan 	 * Decide to use DMA map or copy mode.
5386bafec742SSukumar Swaminathan 	 * DMA map mode must be used when the total msg length is more than the
5387bafec742SSukumar Swaminathan 	 * tx buffer length.
5388bafec742SSukumar Swaminathan 	 */
5389bafec742SSukumar Swaminathan 
5390bafec742SSukumar Swaminathan 	if (total_len > tx_buf_len)
5391bafec742SSukumar Swaminathan 		tx_mode = USE_DMA;
5392bafec742SSukumar Swaminathan 	else if	(max_seg_len > QL_MAX_COPY_LENGTH)
5393bafec742SSukumar Swaminathan 		tx_mode = USE_DMA;
5394bafec742SSukumar Swaminathan 	else
5395bafec742SSukumar Swaminathan 		tx_mode = USE_COPY;
5396bafec742SSukumar Swaminathan 
5397bafec742SSukumar Swaminathan 	if (qlge->chksum_cap) {
53980dc2366fSVenugopal Iyer 		mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
5399bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
5400bafec742SSukumar Swaminathan 		    "is 0x%x \n", pflags, qlge->chksum_cap));
5401bafec742SSukumar Swaminathan 		if (qlge->lso_enable) {
5402bafec742SSukumar Swaminathan 			uint32_t lso_flags = 0;
54030dc2366fSVenugopal Iyer 			mac_lso_get(mp, &mss, &lso_flags);
5404bafec742SSukumar Swaminathan 			use_lso = (lso_flags == HW_LSO);
5405bafec742SSukumar Swaminathan 		}
5406bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
5407bafec742SSukumar Swaminathan 		    mss, use_lso));
5408bafec742SSukumar Swaminathan 	}
5409bafec742SSukumar Swaminathan 
5410bafec742SSukumar Swaminathan do_pullup:
5411bafec742SSukumar Swaminathan 
5412bafec742SSukumar Swaminathan 	/* concatenate all frags into one large packet if too fragmented */
5413bafec742SSukumar Swaminathan 	if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
5414bafec742SSukumar Swaminathan 	    force_pullup) {
5415bafec742SSukumar Swaminathan 		mblk_t *mp1;
5416bafec742SSukumar Swaminathan 		if ((mp1 = msgpullup(mp, -1)) != NULL) {
5417bafec742SSukumar Swaminathan 			freemsg(mp);
5418bafec742SSukumar Swaminathan 			mp = mp1;
5419bafec742SSukumar Swaminathan 			frags = 1;
5420bafec742SSukumar Swaminathan 		} else {
5421bafec742SSukumar Swaminathan 			tx_ring->tx_fail_dma_bind++;
5422bafec742SSukumar Swaminathan 			goto bad;
5423bafec742SSukumar Swaminathan 		}
5424bafec742SSukumar Swaminathan 	}
5425bafec742SSukumar Swaminathan 
5426bafec742SSukumar Swaminathan 	tx_cb->tx_bytes = (uint32_t)total_len;
5427bafec742SSukumar Swaminathan 	tx_cb->mp = mp;
5428bafec742SSukumar Swaminathan 	tx_cb->tx_dma_handle_used = 0;
5429bafec742SSukumar Swaminathan 
5430bafec742SSukumar Swaminathan 	if (tx_mode == USE_DMA) {
5431bafec742SSukumar Swaminathan 		msg_len = total_len;
5432bafec742SSukumar Swaminathan 
5433bafec742SSukumar Swaminathan 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5434bafec742SSukumar Swaminathan 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5435bafec742SSukumar Swaminathan 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5436bafec742SSukumar Swaminathan 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5437bafec742SSukumar Swaminathan 
5438bafec742SSukumar Swaminathan 		tx_entry = &mac_iocb_ptr->oal_entry[0];
5439bafec742SSukumar Swaminathan 		oal_entry = NULL;
5440bafec742SSukumar Swaminathan 
5441bafec742SSukumar Swaminathan 		for (tp = mp, oal_entries = j = 0; tp != NULL;
5442bafec742SSukumar Swaminathan 		    tp = tp->b_cont) {
5443bafec742SSukumar Swaminathan 			/* if too many tx dma handles needed */
5444bafec742SSukumar Swaminathan 			if (j >= QL_MAX_TX_DMA_HANDLES) {
5445bafec742SSukumar Swaminathan 				tx_ring->tx_no_dma_handle++;
5446bafec742SSukumar Swaminathan 				if (!force_pullup) {
5447bafec742SSukumar Swaminathan 					force_pullup = 1;
5448bafec742SSukumar Swaminathan 					goto do_pullup;
5449bafec742SSukumar Swaminathan 				} else {
5450bafec742SSukumar Swaminathan 					goto bad;
5451bafec742SSukumar Swaminathan 				}
5452bafec742SSukumar Swaminathan 			}
5453bafec742SSukumar Swaminathan 			nbyte = (uint16_t)MBLKL(tp);
5454bafec742SSukumar Swaminathan 			if (nbyte == 0)
5455bafec742SSukumar Swaminathan 				continue;
5456bafec742SSukumar Swaminathan 
5457bafec742SSukumar Swaminathan 			status = ddi_dma_addr_bind_handle(
5458bafec742SSukumar Swaminathan 			    tx_cb->tx_dma_handle[j], NULL,
5459bafec742SSukumar Swaminathan 			    (caddr_t)tp->b_rptr, nbyte,
5460bafec742SSukumar Swaminathan 			    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
5461bafec742SSukumar Swaminathan 			    0, &dma_cookie, &ncookies);
5462bafec742SSukumar Swaminathan 
5463bafec742SSukumar Swaminathan 			QL_PRINT(DBG_TX, ("map sending data segment: %d, "
5464bafec742SSukumar Swaminathan 			    "length: %d, spans in %d cookies\n",
5465bafec742SSukumar Swaminathan 			    j, nbyte, ncookies));
5466bafec742SSukumar Swaminathan 
5467bafec742SSukumar Swaminathan 			if (status != DDI_DMA_MAPPED) {
5468bafec742SSukumar Swaminathan 				goto bad;
5469bafec742SSukumar Swaminathan 			}
5470bafec742SSukumar Swaminathan 			/*
5471bafec742SSukumar Swaminathan 			 * Each fragment can span several cookies. One cookie
5472bafec742SSukumar Swaminathan 			 * will use one tx descriptor to transmit.
5473bafec742SSukumar Swaminathan 			 */
5474bafec742SSukumar Swaminathan 			for (i = ncookies; i > 0; i--, tx_entry++,
5475bafec742SSukumar Swaminathan 			    oal_entries++) {
5476bafec742SSukumar Swaminathan 				/*
5477bafec742SSukumar Swaminathan 				 * The number of TX descriptors that can be
5478bafec742SSukumar Swaminathan 				 *  saved in tx iocb and oal list is limited
5479bafec742SSukumar Swaminathan 				 */
5480bafec742SSukumar Swaminathan 				if (oal_entries > max_oal_entries) {
5481bafec742SSukumar Swaminathan 					tx_ring->tx_no_dma_cookie++;
5482bafec742SSukumar Swaminathan 					if (!force_pullup) {
5483bafec742SSukumar Swaminathan 						force_pullup = 1;
5484bafec742SSukumar Swaminathan 						goto do_pullup;
5485bafec742SSukumar Swaminathan 					} else {
5486bafec742SSukumar Swaminathan 						goto bad;
5487bafec742SSukumar Swaminathan 					}
5488bafec742SSukumar Swaminathan 				}
5489bafec742SSukumar Swaminathan 
5490bafec742SSukumar Swaminathan 				if ((oal_entries == TX_DESC_PER_IOCB) &&
5491bafec742SSukumar Swaminathan 				    !oal_entry) {
5492bafec742SSukumar Swaminathan 					/*
5493bafec742SSukumar Swaminathan 					 * Time to switch to an oal list
5494bafec742SSukumar Swaminathan 					 * The last entry should be copied
5495bafec742SSukumar Swaminathan 					 * to first entry in the oal list
5496bafec742SSukumar Swaminathan 					 */
5497bafec742SSukumar Swaminathan 					oal_entry = tx_cb->oal;
5498bafec742SSukumar Swaminathan 					tx_entry =
5499bafec742SSukumar Swaminathan 					    &mac_iocb_ptr->oal_entry[
5500bafec742SSukumar Swaminathan 					    TX_DESC_PER_IOCB-1];
5501bafec742SSukumar Swaminathan 					bcopy(tx_entry, oal_entry,
5502bafec742SSukumar Swaminathan 					    sizeof (*oal_entry));
5503bafec742SSukumar Swaminathan 
5504bafec742SSukumar Swaminathan 					/*
5505bafec742SSukumar Swaminathan 					 * last entry should be updated to
5506bafec742SSukumar Swaminathan 					 * point to the extended oal list itself
5507bafec742SSukumar Swaminathan 					 */
5508bafec742SSukumar Swaminathan 					tx_entry->buf_addr_low =
5509bafec742SSukumar Swaminathan 					    cpu_to_le32(
5510bafec742SSukumar Swaminathan 					    LS_64BITS(tx_cb->oal_dma_addr));
5511bafec742SSukumar Swaminathan 					tx_entry->buf_addr_high =
5512bafec742SSukumar Swaminathan 					    cpu_to_le32(
5513bafec742SSukumar Swaminathan 					    MS_64BITS(tx_cb->oal_dma_addr));
5514bafec742SSukumar Swaminathan 					/*
5515bafec742SSukumar Swaminathan 					 * Point tx_entry to the oal list
5516bafec742SSukumar Swaminathan 					 * second entry
5517bafec742SSukumar Swaminathan 					 */
5518bafec742SSukumar Swaminathan 					tx_entry = &oal_entry[1];
5519bafec742SSukumar Swaminathan 				}
5520bafec742SSukumar Swaminathan 
5521bafec742SSukumar Swaminathan 				tx_entry->buf_len =
5522bafec742SSukumar Swaminathan 				    (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
5523bafec742SSukumar Swaminathan 				phys_addr = dma_cookie.dmac_laddress;
5524bafec742SSukumar Swaminathan 				tx_entry->buf_addr_low =
5525bafec742SSukumar Swaminathan 				    cpu_to_le32(LS_64BITS(phys_addr));
5526bafec742SSukumar Swaminathan 				tx_entry->buf_addr_high =
5527bafec742SSukumar Swaminathan 				    cpu_to_le32(MS_64BITS(phys_addr));
5528bafec742SSukumar Swaminathan 
5529bafec742SSukumar Swaminathan 				last_oal_entry = tx_entry;
5530bafec742SSukumar Swaminathan 
5531bafec742SSukumar Swaminathan 				if (i > 1)
5532bafec742SSukumar Swaminathan 					ddi_dma_nextcookie(
5533bafec742SSukumar Swaminathan 					    tx_cb->tx_dma_handle[j],
5534bafec742SSukumar Swaminathan 					    &dma_cookie);
5535bafec742SSukumar Swaminathan 			}
5536bafec742SSukumar Swaminathan 			j++;
5537bafec742SSukumar Swaminathan 		}
5538bafec742SSukumar Swaminathan 		/*
5539bafec742SSukumar Swaminathan 		 * if OAL is used, the last oal entry in tx iocb indicates
5540bafec742SSukumar Swaminathan 		 * number of additional address/len pairs in OAL
5541bafec742SSukumar Swaminathan 		 */
5542bafec742SSukumar Swaminathan 		if (oal_entries > TX_DESC_PER_IOCB) {
5543bafec742SSukumar Swaminathan 			tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
5544bafec742SSukumar Swaminathan 			tx_entry->buf_len = (uint32_t)
5545bafec742SSukumar Swaminathan 			    (cpu_to_le32((sizeof (struct oal_entry) *
5546bafec742SSukumar Swaminathan 			    (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
5547bafec742SSukumar Swaminathan 		}
5548bafec742SSukumar Swaminathan 		last_oal_entry->buf_len = cpu_to_le32(
5549bafec742SSukumar Swaminathan 		    le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
5550bafec742SSukumar Swaminathan 
5551bafec742SSukumar Swaminathan 		tx_cb->tx_dma_handle_used = j;
5552bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
5553bafec742SSukumar Swaminathan 		    j, oal_entries));
5554bafec742SSukumar Swaminathan 
5555bafec742SSukumar Swaminathan 		bp = (caddr_t)mp->b_rptr;
5556bafec742SSukumar Swaminathan 	}
5557bafec742SSukumar Swaminathan 	if (tx_mode == USE_COPY) {
5558bafec742SSukumar Swaminathan 		bp = tx_cb->copy_buffer;
5559bafec742SSukumar Swaminathan 		off = 0;
5560bafec742SSukumar Swaminathan 		nbyte = 0;
5561bafec742SSukumar Swaminathan 		frags = 0;
5562bafec742SSukumar Swaminathan 		/*
5563bafec742SSukumar Swaminathan 		 * Copy up to tx_buf_len of the transmit data
5564bafec742SSukumar Swaminathan 		 * from mp to tx buffer
5565bafec742SSukumar Swaminathan 		 */
5566bafec742SSukumar Swaminathan 		for (tp = mp; tp != NULL; tp = tp->b_cont) {
5567bafec742SSukumar Swaminathan 			nbyte = MBLKL(tp);
5568bafec742SSukumar Swaminathan 			if ((off + nbyte) <= tx_buf_len) {
5569bafec742SSukumar Swaminathan 				bcopy(tp->b_rptr, &bp[off], nbyte);
5570bafec742SSukumar Swaminathan 				off += nbyte;
5571bafec742SSukumar Swaminathan 				frags ++;
5572bafec742SSukumar Swaminathan 			}
5573bafec742SSukumar Swaminathan 		}
5574bafec742SSukumar Swaminathan 
5575bafec742SSukumar Swaminathan 		msg_len = off;
5576bafec742SSukumar Swaminathan 
5577bafec742SSukumar Swaminathan 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5578bafec742SSukumar Swaminathan 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5579bafec742SSukumar Swaminathan 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5580bafec742SSukumar Swaminathan 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5581bafec742SSukumar Swaminathan 
5582bafec742SSukumar Swaminathan 		QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
5583bafec742SSukumar Swaminathan 		    "from %d segaments\n", msg_len, frags));
5584bafec742SSukumar Swaminathan 
5585bafec742SSukumar Swaminathan 		phys_addr = tx_cb->copy_buffer_dma_addr;
5586bafec742SSukumar Swaminathan 		phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
5587bafec742SSukumar Swaminathan 		phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
5588bafec742SSukumar Swaminathan 
5589bafec742SSukumar Swaminathan 		QL_DUMP(DBG_TX, "\t requested sending data:\n",
5590bafec742SSukumar Swaminathan 		    (uint8_t *)tx_cb->copy_buffer, 8, total_len);
5591bafec742SSukumar Swaminathan 
5592bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
5593bafec742SSukumar Swaminathan 		    cpu_to_le32(msg_len | OAL_LAST_ENTRY);
5594bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_addr_low  = phy_addr_low;
5595bafec742SSukumar Swaminathan 		mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
5596bafec742SSukumar Swaminathan 
5597bafec742SSukumar Swaminathan 		freemsg(mp); /* no need, we have copied */
5598bafec742SSukumar Swaminathan 		tx_cb->mp = NULL;
5599bafec742SSukumar Swaminathan 	} /* End of Copy Mode */
5600bafec742SSukumar Swaminathan 
5601bafec742SSukumar Swaminathan 	/* Do TSO/LSO on TCP packet? */
5602bafec742SSukumar Swaminathan 	if (use_lso && mss) {
5603bafec742SSukumar Swaminathan 		ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
5604bafec742SSukumar Swaminathan 	} else if (pflags & qlge->chksum_cap) {
5605bafec742SSukumar Swaminathan 		/* Do checksum offloading */
5606bafec742SSukumar Swaminathan 		ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
5607bafec742SSukumar Swaminathan 	}
5608bafec742SSukumar Swaminathan 
5609bafec742SSukumar Swaminathan 	/* let device know the latest outbound IOCB */
5610bafec742SSukumar Swaminathan 	(void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
5611bafec742SSukumar Swaminathan 	    (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
5612bafec742SSukumar Swaminathan 	    (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
5613bafec742SSukumar Swaminathan 
5614bafec742SSukumar Swaminathan 	if (tx_mode == USE_DMA) {
5615bafec742SSukumar Swaminathan 		/* let device know the latest outbound OAL if necessary */
5616bafec742SSukumar Swaminathan 		if (oal_entries > TX_DESC_PER_IOCB) {
5617bafec742SSukumar Swaminathan 			(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5618bafec742SSukumar Swaminathan 			    (off_t)0,
5619bafec742SSukumar Swaminathan 			    (sizeof (struct oal_entry) *
5620bafec742SSukumar Swaminathan 			    (oal_entries -TX_DESC_PER_IOCB+1)),
5621bafec742SSukumar Swaminathan 			    DDI_DMA_SYNC_FORDEV);
5622bafec742SSukumar Swaminathan 		}
5623bafec742SSukumar Swaminathan 	} else { /* for USE_COPY mode, tx buffer has changed */
5624bafec742SSukumar Swaminathan 		/* let device know the latest change */
5625bafec742SSukumar Swaminathan 		(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5626bafec742SSukumar Swaminathan 		/* copy buf offset */
5627bafec742SSukumar Swaminathan 		    (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
5628bafec742SSukumar Swaminathan 		    msg_len, DDI_DMA_SYNC_FORDEV);
5629bafec742SSukumar Swaminathan 	}
5630bafec742SSukumar Swaminathan 
5631bafec742SSukumar Swaminathan 	/* save how the packet was sent */
5632bafec742SSukumar Swaminathan 	tx_cb->tx_type = tx_mode;
5633bafec742SSukumar Swaminathan 
5634bafec742SSukumar Swaminathan 	QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
5635bafec742SSukumar Swaminathan 	/* reduce the number of available tx slot */
5636bafec742SSukumar Swaminathan 	atomic_dec_32(&tx_ring->tx_free_count);
5637bafec742SSukumar Swaminathan 
5638bafec742SSukumar Swaminathan 	tx_ring->prod_idx++;
5639bafec742SSukumar Swaminathan 	if (tx_ring->prod_idx >= tx_ring->wq_len)
5640bafec742SSukumar Swaminathan 		tx_ring->prod_idx = 0;
5641bafec742SSukumar Swaminathan 
5642bafec742SSukumar Swaminathan 	now = ddi_get_lbolt();
5643bafec742SSukumar Swaminathan 	qlge->last_tx_time = now;
5644bafec742SSukumar Swaminathan 
5645bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5646bafec742SSukumar Swaminathan 
5647bafec742SSukumar Swaminathan bad:
5648bafec742SSukumar Swaminathan 	/*
5649bafec742SSukumar Swaminathan 	 * if for any reason driver can not send, delete
5650bafec742SSukumar Swaminathan 	 * the message pointer, mp
5651bafec742SSukumar Swaminathan 	 */
5652bafec742SSukumar Swaminathan 	now = ddi_get_lbolt();
5653bafec742SSukumar Swaminathan 	freemsg(mp);
5654bafec742SSukumar Swaminathan 	mp = NULL;
5655accf27a5SSukumar Swaminathan 	tx_cb->mp = NULL;
5656bafec742SSukumar Swaminathan 	for (i = 0; i < j; i++)
5657bafec742SSukumar Swaminathan 		(void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5658bafec742SSukumar Swaminathan 
5659bafec742SSukumar Swaminathan 	QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5660bafec742SSukumar Swaminathan 	    __func__, qlge->instance, (int)now));
5661bafec742SSukumar Swaminathan 
5662bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5663bafec742SSukumar Swaminathan }
5664bafec742SSukumar Swaminathan 
5665bafec742SSukumar Swaminathan 
5666bafec742SSukumar Swaminathan /*
5667bafec742SSukumar Swaminathan  * Initializes hardware and driver software flags before the driver
5668bafec742SSukumar Swaminathan  * is finally ready to work.
5669bafec742SSukumar Swaminathan  */
5670bafec742SSukumar Swaminathan int
ql_do_start(qlge_t * qlge)5671bafec742SSukumar Swaminathan ql_do_start(qlge_t *qlge)
5672bafec742SSukumar Swaminathan {
5673bafec742SSukumar Swaminathan 	int i;
5674bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5675bafec742SSukumar Swaminathan 	uint16_t lbq_buf_size;
5676bafec742SSukumar Swaminathan 	int rings_done;
5677bafec742SSukumar Swaminathan 
5678bafec742SSukumar Swaminathan 	ASSERT(qlge != NULL);
5679bafec742SSukumar Swaminathan 
5680bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
5681bafec742SSukumar Swaminathan 
5682bafec742SSukumar Swaminathan 	/* Reset adapter */
56830662fbf4SSukumar Swaminathan 	(void) ql_asic_reset(qlge);
5684bafec742SSukumar Swaminathan 
5685bafec742SSukumar Swaminathan 	lbq_buf_size = (uint16_t)
5686accf27a5SSukumar Swaminathan 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
5687bafec742SSukumar Swaminathan 	if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5688bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5689bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5690bafec742SSukumar Swaminathan 		    qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5691bafec742SSukumar Swaminathan #endif
5692bafec742SSukumar Swaminathan 		/*
5693bafec742SSukumar Swaminathan 		 * Check if any ring has buffers still with upper layers
5694bafec742SSukumar Swaminathan 		 * If buffers are pending with upper layers, we use the
5695bafec742SSukumar Swaminathan 		 * existing buffers and don't reallocate new ones
5696bafec742SSukumar Swaminathan 		 * Unfortunately there is no way to evict buffers from
5697bafec742SSukumar Swaminathan 		 * upper layers. Using buffers with the current size may
5698bafec742SSukumar Swaminathan 		 * cause slightly sub-optimal performance, but that seems
5699bafec742SSukumar Swaminathan 		 * to be the easiest way to handle this situation.
5700bafec742SSukumar Swaminathan 		 */
5701bafec742SSukumar Swaminathan 		rings_done = 0;
5702bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
5703bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i];
5704bafec742SSukumar Swaminathan 			if (rx_ring->rx_indicate == 0)
5705bafec742SSukumar Swaminathan 				rings_done++;
5706bafec742SSukumar Swaminathan 			else
5707bafec742SSukumar Swaminathan 				break;
5708bafec742SSukumar Swaminathan 		}
5709bafec742SSukumar Swaminathan 		/*
5710bafec742SSukumar Swaminathan 		 * No buffers pending with upper layers;
5711bafec742SSukumar Swaminathan 		 * reallocte them for new MTU size
5712bafec742SSukumar Swaminathan 		 */
5713bafec742SSukumar Swaminathan 		if (rings_done >= qlge->rx_ring_count) {
5714bafec742SSukumar Swaminathan 			/* free large buffer pool */
5715bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->rx_ring_count; i++) {
5716bafec742SSukumar Swaminathan 				rx_ring = &qlge->rx_ring[i];
5717bafec742SSukumar Swaminathan 				if (rx_ring->type != TX_Q) {
5718bafec742SSukumar Swaminathan 					ql_free_sbq_buffers(rx_ring);
5719bafec742SSukumar Swaminathan 					ql_free_lbq_buffers(rx_ring);
5720bafec742SSukumar Swaminathan 				}
5721bafec742SSukumar Swaminathan 			}
5722bafec742SSukumar Swaminathan 			/* reallocate large buffer pool */
5723bafec742SSukumar Swaminathan 			for (i = 0; i < qlge->rx_ring_count; i++) {
5724bafec742SSukumar Swaminathan 				rx_ring = &qlge->rx_ring[i];
5725bafec742SSukumar Swaminathan 				if (rx_ring->type != TX_Q) {
57260662fbf4SSukumar Swaminathan 					(void) ql_alloc_sbufs(qlge, rx_ring);
57270662fbf4SSukumar Swaminathan 					(void) ql_alloc_lbufs(qlge, rx_ring);
5728bafec742SSukumar Swaminathan 				}
5729bafec742SSukumar Swaminathan 			}
5730bafec742SSukumar Swaminathan 		}
5731bafec742SSukumar Swaminathan 	}
5732bafec742SSukumar Swaminathan 
5733bafec742SSukumar Swaminathan 	if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5734bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge bringup adapter failed");
5735bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
5736accf27a5SSukumar Swaminathan 		if (qlge->fm_enable) {
5737accf27a5SSukumar Swaminathan 			atomic_or_32(&qlge->flags, ADAPTER_ERROR);
5738accf27a5SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
5739accf27a5SSukumar Swaminathan 		}
5740bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
5741bafec742SSukumar Swaminathan 	}
5742bafec742SSukumar Swaminathan 
5743bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
5744accf27a5SSukumar Swaminathan 	/* if adapter is up successfully but was bad before */
5745accf27a5SSukumar Swaminathan 	if (qlge->flags & ADAPTER_ERROR) {
5746accf27a5SSukumar Swaminathan 		atomic_and_32(&qlge->flags, ~ADAPTER_ERROR);
5747accf27a5SSukumar Swaminathan 		if (qlge->fm_enable) {
5748accf27a5SSukumar Swaminathan 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
5749accf27a5SSukumar Swaminathan 		}
5750accf27a5SSukumar Swaminathan 	}
5751bafec742SSukumar Swaminathan 
5752bafec742SSukumar Swaminathan 	/* Get current link state */
5753bafec742SSukumar Swaminathan 	qlge->port_link_state = ql_get_link_state(qlge);
5754bafec742SSukumar Swaminathan 
5755bafec742SSukumar Swaminathan 	if (qlge->port_link_state == LS_UP) {
5756bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5757bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
5758bafec742SSukumar Swaminathan 		/* If driver detects a carrier on */
5759bafec742SSukumar Swaminathan 		CARRIER_ON(qlge);
5760bafec742SSukumar Swaminathan 	} else {
5761bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5762bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
5763bafec742SSukumar Swaminathan 		/* If driver detects a lack of carrier */
5764bafec742SSukumar Swaminathan 		CARRIER_OFF(qlge);
5765bafec742SSukumar Swaminathan 	}
5766bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STARTED;
5767bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
5768bafec742SSukumar Swaminathan }
5769bafec742SSukumar Swaminathan 
5770bafec742SSukumar Swaminathan /*
5771bafec742SSukumar Swaminathan  * Stop currently running driver
5772bafec742SSukumar Swaminathan  * Driver needs to stop routing new packets to driver and wait until
5773bafec742SSukumar Swaminathan  * all pending tx/rx buffers to be free-ed.
5774bafec742SSukumar Swaminathan  */
5775bafec742SSukumar Swaminathan int
ql_do_stop(qlge_t * qlge)5776bafec742SSukumar Swaminathan ql_do_stop(qlge_t *qlge)
5777bafec742SSukumar Swaminathan {
5778bafec742SSukumar Swaminathan 	int rc = DDI_FAILURE;
5779bafec742SSukumar Swaminathan 	uint32_t i, j, k;
5780bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc, *lbq_desc;
5781bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
5782bafec742SSukumar Swaminathan 
5783bafec742SSukumar Swaminathan 	ASSERT(qlge != NULL);
5784bafec742SSukumar Swaminathan 
5785bafec742SSukumar Swaminathan 	CARRIER_OFF(qlge);
5786bafec742SSukumar Swaminathan 
5787bafec742SSukumar Swaminathan 	rc = ql_bringdown_adapter(qlge);
5788bafec742SSukumar Swaminathan 	if (rc != DDI_SUCCESS) {
5789bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5790bafec742SSukumar Swaminathan 	} else
5791bafec742SSukumar Swaminathan 		rc = DDI_SUCCESS;
5792bafec742SSukumar Swaminathan 
5793bafec742SSukumar Swaminathan 	for (k = 0; k < qlge->rx_ring_count; k++) {
5794bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[k];
5795bafec742SSukumar Swaminathan 		if (rx_ring->type != TX_Q) {
5796bafec742SSukumar Swaminathan 			j = rx_ring->lbq_use_head;
5797bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5798bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5799bafec742SSukumar Swaminathan 			    " to free list %d\n total %d\n",
5800bafec742SSukumar Swaminathan 			    k, rx_ring->lbuf_in_use_count,
5801bafec742SSukumar Swaminathan 			    rx_ring->lbuf_free_count,
5802bafec742SSukumar Swaminathan 			    rx_ring->lbuf_in_use_count +
5803bafec742SSukumar Swaminathan 			    rx_ring->lbuf_free_count);
5804bafec742SSukumar Swaminathan #endif
5805bafec742SSukumar Swaminathan 			for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5806bafec742SSukumar Swaminathan 				lbq_desc = rx_ring->lbuf_in_use[j];
5807bafec742SSukumar Swaminathan 				j++;
5808bafec742SSukumar Swaminathan 				if (j >= rx_ring->lbq_len) {
5809bafec742SSukumar Swaminathan 					j = 0;
5810bafec742SSukumar Swaminathan 				}
5811bafec742SSukumar Swaminathan 				if (lbq_desc->mp) {
5812bafec742SSukumar Swaminathan 					atomic_inc_32(&rx_ring->rx_indicate);
5813bafec742SSukumar Swaminathan 					freemsg(lbq_desc->mp);
5814bafec742SSukumar Swaminathan 				}
5815bafec742SSukumar Swaminathan 			}
5816bafec742SSukumar Swaminathan 			rx_ring->lbq_use_head = j;
5817bafec742SSukumar Swaminathan 			rx_ring->lbq_use_tail = j;
5818bafec742SSukumar Swaminathan 			rx_ring->lbuf_in_use_count = 0;
5819bafec742SSukumar Swaminathan 			j = rx_ring->sbq_use_head;
5820bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
5821bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5822bafec742SSukumar Swaminathan 			    " to free list %d\n total %d \n",
5823bafec742SSukumar Swaminathan 			    k, rx_ring->sbuf_in_use_count,
5824bafec742SSukumar Swaminathan 			    rx_ring->sbuf_free_count,
5825bafec742SSukumar Swaminathan 			    rx_ring->sbuf_in_use_count +
5826bafec742SSukumar Swaminathan 			    rx_ring->sbuf_free_count);
5827bafec742SSukumar Swaminathan #endif
5828bafec742SSukumar Swaminathan 			for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5829bafec742SSukumar Swaminathan 				sbq_desc = rx_ring->sbuf_in_use[j];
5830bafec742SSukumar Swaminathan 				j++;
5831bafec742SSukumar Swaminathan 				if (j >= rx_ring->sbq_len) {
5832bafec742SSukumar Swaminathan 					j = 0;
5833bafec742SSukumar Swaminathan 				}
5834bafec742SSukumar Swaminathan 				if (sbq_desc->mp) {
5835bafec742SSukumar Swaminathan 					atomic_inc_32(&rx_ring->rx_indicate);
5836bafec742SSukumar Swaminathan 					freemsg(sbq_desc->mp);
5837bafec742SSukumar Swaminathan 				}
5838bafec742SSukumar Swaminathan 			}
5839bafec742SSukumar Swaminathan 			rx_ring->sbq_use_head = j;
5840bafec742SSukumar Swaminathan 			rx_ring->sbq_use_tail = j;
5841bafec742SSukumar Swaminathan 			rx_ring->sbuf_in_use_count = 0;
5842bafec742SSukumar Swaminathan 		}
5843bafec742SSukumar Swaminathan 	}
5844bafec742SSukumar Swaminathan 
5845bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_STOPPED;
5846bafec742SSukumar Swaminathan 
5847bafec742SSukumar Swaminathan 	return (rc);
5848bafec742SSukumar Swaminathan }
5849bafec742SSukumar Swaminathan 
5850bafec742SSukumar Swaminathan /*
5851bafec742SSukumar Swaminathan  * Support
5852bafec742SSukumar Swaminathan  */
5853bafec742SSukumar Swaminathan 
5854bafec742SSukumar Swaminathan void
ql_disable_isr(qlge_t * qlge)5855bafec742SSukumar Swaminathan ql_disable_isr(qlge_t *qlge)
5856bafec742SSukumar Swaminathan {
5857bafec742SSukumar Swaminathan 	/*
5858bafec742SSukumar Swaminathan 	 * disable the hardware interrupt
5859bafec742SSukumar Swaminathan 	 */
5860bafec742SSukumar Swaminathan 	ISP_DISABLE_GLOBAL_INTRS(qlge);
5861bafec742SSukumar Swaminathan 
5862bafec742SSukumar Swaminathan 	qlge->flags &= ~INTERRUPTS_ENABLED;
5863bafec742SSukumar Swaminathan }
5864bafec742SSukumar Swaminathan 
5865bafec742SSukumar Swaminathan 
5866bafec742SSukumar Swaminathan 
5867bafec742SSukumar Swaminathan /*
5868bafec742SSukumar Swaminathan  * busy wait for 'usecs' microseconds.
5869bafec742SSukumar Swaminathan  */
5870bafec742SSukumar Swaminathan void
qlge_delay(clock_t usecs)5871bafec742SSukumar Swaminathan qlge_delay(clock_t usecs)
5872bafec742SSukumar Swaminathan {
5873bafec742SSukumar Swaminathan 	drv_usecwait(usecs);
5874bafec742SSukumar Swaminathan }
5875bafec742SSukumar Swaminathan 
5876bafec742SSukumar Swaminathan /*
5877bafec742SSukumar Swaminathan  * retrieve firmware details.
5878bafec742SSukumar Swaminathan  */
5879bafec742SSukumar Swaminathan 
5880bafec742SSukumar Swaminathan pci_cfg_t *
ql_get_pci_config(qlge_t * qlge)5881bafec742SSukumar Swaminathan ql_get_pci_config(qlge_t *qlge)
5882bafec742SSukumar Swaminathan {
5883bafec742SSukumar Swaminathan 	return (&(qlge->pci_cfg));
5884bafec742SSukumar Swaminathan }
5885bafec742SSukumar Swaminathan 
5886bafec742SSukumar Swaminathan /*
5887bafec742SSukumar Swaminathan  * Get current Link status
5888bafec742SSukumar Swaminathan  */
5889bafec742SSukumar Swaminathan static uint32_t
ql_get_link_state(qlge_t * qlge)5890bafec742SSukumar Swaminathan ql_get_link_state(qlge_t *qlge)
5891bafec742SSukumar Swaminathan {
5892bafec742SSukumar Swaminathan 	uint32_t bitToCheck = 0;
5893bafec742SSukumar Swaminathan 	uint32_t temp, linkState;
5894bafec742SSukumar Swaminathan 
5895bafec742SSukumar Swaminathan 	if (qlge->func_number == qlge->fn0_net) {
5896bafec742SSukumar Swaminathan 		bitToCheck = STS_PL0;
5897bafec742SSukumar Swaminathan 	} else {
5898bafec742SSukumar Swaminathan 		bitToCheck = STS_PL1;
5899bafec742SSukumar Swaminathan 	}
5900bafec742SSukumar Swaminathan 	temp = ql_read_reg(qlge, REG_STATUS);
5901bafec742SSukumar Swaminathan 	QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5902bafec742SSukumar Swaminathan 	    __func__, qlge->instance, temp));
5903bafec742SSukumar Swaminathan 
5904bafec742SSukumar Swaminathan 	if (temp & bitToCheck) {
5905bafec742SSukumar Swaminathan 		linkState = LS_UP;
5906bafec742SSukumar Swaminathan 	} else {
5907bafec742SSukumar Swaminathan 		linkState = LS_DOWN;
5908bafec742SSukumar Swaminathan 	}
5909bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
5910bafec742SSukumar Swaminathan 		/* for Schultz, link Speed is fixed to 10G, full duplex */
5911bafec742SSukumar Swaminathan 		qlge->speed  = SPEED_10G;
5912bafec742SSukumar Swaminathan 		qlge->duplex = 1;
5913bafec742SSukumar Swaminathan 	}
5914bafec742SSukumar Swaminathan 	return (linkState);
5915bafec742SSukumar Swaminathan }
5916bafec742SSukumar Swaminathan /*
5917bafec742SSukumar Swaminathan  * Get current link status and report to OS
5918bafec742SSukumar Swaminathan  */
5919bafec742SSukumar Swaminathan static void
ql_get_and_report_link_state(qlge_t * qlge)5920bafec742SSukumar Swaminathan ql_get_and_report_link_state(qlge_t *qlge)
5921bafec742SSukumar Swaminathan {
5922bafec742SSukumar Swaminathan 	uint32_t cur_link_state;
5923bafec742SSukumar Swaminathan 
5924bafec742SSukumar Swaminathan 	/* Get current link state */
5925bafec742SSukumar Swaminathan 	cur_link_state = ql_get_link_state(qlge);
5926bafec742SSukumar Swaminathan 	/* if link state has changed */
5927bafec742SSukumar Swaminathan 	if (cur_link_state != qlge->port_link_state) {
5928bafec742SSukumar Swaminathan 
5929bafec742SSukumar Swaminathan 		qlge->port_link_state = cur_link_state;
5930bafec742SSukumar Swaminathan 
5931bafec742SSukumar Swaminathan 		if (qlge->port_link_state == LS_UP) {
5932bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5933bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
5934bafec742SSukumar Swaminathan 			/* If driver detects a carrier on */
5935bafec742SSukumar Swaminathan 			CARRIER_ON(qlge);
5936bafec742SSukumar Swaminathan 		} else {
5937bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5938bafec742SSukumar Swaminathan 			    __func__, qlge->instance));
5939bafec742SSukumar Swaminathan 			/* If driver detects a lack of carrier */
5940bafec742SSukumar Swaminathan 			CARRIER_OFF(qlge);
5941bafec742SSukumar Swaminathan 		}
5942bafec742SSukumar Swaminathan 	}
5943bafec742SSukumar Swaminathan }
5944bafec742SSukumar Swaminathan 
5945bafec742SSukumar Swaminathan /*
5946bafec742SSukumar Swaminathan  * timer callback function executed after timer expires
5947bafec742SSukumar Swaminathan  */
5948bafec742SSukumar Swaminathan static void
ql_timer(void * arg)5949bafec742SSukumar Swaminathan ql_timer(void* arg)
5950bafec742SSukumar Swaminathan {
5951bafec742SSukumar Swaminathan 	ql_get_and_report_link_state((qlge_t *)arg);
5952bafec742SSukumar Swaminathan }
5953bafec742SSukumar Swaminathan 
5954bafec742SSukumar Swaminathan /*
5955bafec742SSukumar Swaminathan  * stop the running timer if activated
5956bafec742SSukumar Swaminathan  */
5957bafec742SSukumar Swaminathan static void
ql_stop_timer(qlge_t * qlge)5958bafec742SSukumar Swaminathan ql_stop_timer(qlge_t *qlge)
5959bafec742SSukumar Swaminathan {
5960bafec742SSukumar Swaminathan 	timeout_id_t timer_id;
5961bafec742SSukumar Swaminathan 	/* Disable driver timer */
5962bafec742SSukumar Swaminathan 	if (qlge->ql_timer_timeout_id != NULL) {
5963bafec742SSukumar Swaminathan 		timer_id = qlge->ql_timer_timeout_id;
5964bafec742SSukumar Swaminathan 		qlge->ql_timer_timeout_id = NULL;
5965bafec742SSukumar Swaminathan 		(void) untimeout(timer_id);
5966bafec742SSukumar Swaminathan 	}
5967bafec742SSukumar Swaminathan }
5968bafec742SSukumar Swaminathan 
5969bafec742SSukumar Swaminathan /*
5970bafec742SSukumar Swaminathan  * stop then restart timer
5971bafec742SSukumar Swaminathan  */
5972bafec742SSukumar Swaminathan void
ql_restart_timer(qlge_t * qlge)5973bafec742SSukumar Swaminathan ql_restart_timer(qlge_t *qlge)
5974bafec742SSukumar Swaminathan {
5975bafec742SSukumar Swaminathan 	ql_stop_timer(qlge);
5976bafec742SSukumar Swaminathan 	qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5977bafec742SSukumar Swaminathan 	qlge->ql_timer_timeout_id = timeout(ql_timer,
5978bafec742SSukumar Swaminathan 	    (void *)qlge, qlge->ql_timer_ticks);
5979bafec742SSukumar Swaminathan }
5980bafec742SSukumar Swaminathan 
5981bafec742SSukumar Swaminathan /* ************************************************************************* */
5982bafec742SSukumar Swaminathan /*
5983bafec742SSukumar Swaminathan  *		Hardware K-Stats Data Structures and Subroutines
5984bafec742SSukumar Swaminathan  */
5985bafec742SSukumar Swaminathan /* ************************************************************************* */
5986bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_hw[] = {
5987bafec742SSukumar Swaminathan 	/* PCI related hardware information */
5988bafec742SSukumar Swaminathan 	{ 0, "Vendor Id"			},
5989bafec742SSukumar Swaminathan 	{ 1, "Device Id"			},
5990bafec742SSukumar Swaminathan 	{ 2, "Command"				},
5991bafec742SSukumar Swaminathan 	{ 3, "Status"				},
5992bafec742SSukumar Swaminathan 	{ 4, "Revision Id"			},
5993bafec742SSukumar Swaminathan 	{ 5, "Cache Line Size"			},
5994bafec742SSukumar Swaminathan 	{ 6, "Latency Timer"			},
5995bafec742SSukumar Swaminathan 	{ 7, "Header Type"			},
5996bafec742SSukumar Swaminathan 	{ 9, "I/O base addr"			},
5997bafec742SSukumar Swaminathan 	{ 10, "Control Reg Base addr low"	},
5998bafec742SSukumar Swaminathan 	{ 11, "Control Reg Base addr high"	},
5999bafec742SSukumar Swaminathan 	{ 12, "Doorbell Reg Base addr low"	},
6000bafec742SSukumar Swaminathan 	{ 13, "Doorbell Reg Base addr high"	},
6001bafec742SSukumar Swaminathan 	{ 14, "Subsystem Vendor Id"		},
6002bafec742SSukumar Swaminathan 	{ 15, "Subsystem Device ID"		},
6003bafec742SSukumar Swaminathan 	{ 16, "PCIe Device Control"		},
6004bafec742SSukumar Swaminathan 	{ 17, "PCIe Link Status"		},
6005bafec742SSukumar Swaminathan 
6006bafec742SSukumar Swaminathan 	{ -1,	NULL				},
6007bafec742SSukumar Swaminathan };
6008bafec742SSukumar Swaminathan 
6009bafec742SSukumar Swaminathan /*
6010bafec742SSukumar Swaminathan  * kstat update function for PCI registers
6011bafec742SSukumar Swaminathan  */
6012bafec742SSukumar Swaminathan static int
ql_kstats_get_pci_regs(kstat_t * ksp,int flag)6013bafec742SSukumar Swaminathan ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
6014bafec742SSukumar Swaminathan {
6015bafec742SSukumar Swaminathan 	qlge_t *qlge;
6016bafec742SSukumar Swaminathan 	kstat_named_t *knp;
6017bafec742SSukumar Swaminathan 
6018bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
6019bafec742SSukumar Swaminathan 		return (EACCES);
6020bafec742SSukumar Swaminathan 
6021bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
6022bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
6023bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
6024bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.device_id;
6025bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.command;
6026bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.status;
6027bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.revision;
6028bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
6029bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
6030bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.header_type;
6031bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
6032bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
6033bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
6034bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
6035bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
6036bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
6037bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
6038bafec742SSukumar Swaminathan 	(knp++)->value.ui32 =
6039bafec742SSukumar Swaminathan 	    qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
6040bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
6041bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
6042bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
6043bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->pci_cfg.link_status;
6044bafec742SSukumar Swaminathan 
6045bafec742SSukumar Swaminathan 	return (0);
6046bafec742SSukumar Swaminathan }
6047bafec742SSukumar Swaminathan 
6048bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_mii[] = {
6049bafec742SSukumar Swaminathan 	/* MAC/MII related hardware information */
6050bafec742SSukumar Swaminathan 	{ 0, "mtu"},
6051bafec742SSukumar Swaminathan 
6052bafec742SSukumar Swaminathan 	{ -1, NULL},
6053bafec742SSukumar Swaminathan };
6054bafec742SSukumar Swaminathan 
6055bafec742SSukumar Swaminathan 
6056bafec742SSukumar Swaminathan /*
6057bafec742SSukumar Swaminathan  * kstat update function for MII related information.
6058bafec742SSukumar Swaminathan  */
6059bafec742SSukumar Swaminathan static int
ql_kstats_mii_update(kstat_t * ksp,int flag)6060bafec742SSukumar Swaminathan ql_kstats_mii_update(kstat_t *ksp, int flag)
6061bafec742SSukumar Swaminathan {
6062bafec742SSukumar Swaminathan 	qlge_t *qlge;
6063bafec742SSukumar Swaminathan 	kstat_named_t *knp;
6064bafec742SSukumar Swaminathan 
6065bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
6066bafec742SSukumar Swaminathan 		return (EACCES);
6067bafec742SSukumar Swaminathan 
6068bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
6069bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
6070bafec742SSukumar Swaminathan 
6071bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->mtu;
6072bafec742SSukumar Swaminathan 
6073bafec742SSukumar Swaminathan 	return (0);
6074bafec742SSukumar Swaminathan }
6075bafec742SSukumar Swaminathan 
6076bafec742SSukumar Swaminathan static const ql_ksindex_t ql_kstats_reg[] = {
6077bafec742SSukumar Swaminathan 	/* Register information */
6078bafec742SSukumar Swaminathan 	{ 0, "System (0x08)"			},
6079bafec742SSukumar Swaminathan 	{ 1, "Reset/Fail Over(0x0Ch"		},
6080bafec742SSukumar Swaminathan 	{ 2, "Function Specific Control(0x10)"	},
6081bafec742SSukumar Swaminathan 	{ 3, "Status (0x30)"			},
6082bafec742SSukumar Swaminathan 	{ 4, "Intr Enable (0x34)"		},
6083bafec742SSukumar Swaminathan 	{ 5, "Intr Status1 (0x3C)"		},
6084bafec742SSukumar Swaminathan 	{ 6, "Error Status (0x54)"		},
6085bafec742SSukumar Swaminathan 	{ 7, "XGMAC Flow Control(0x11C)"	},
6086bafec742SSukumar Swaminathan 	{ 8, "XGMAC Tx Pause Frames(0x230)"	},
6087bafec742SSukumar Swaminathan 	{ 9, "XGMAC Rx Pause Frames(0x388)"	},
6088bafec742SSukumar Swaminathan 	{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)"	},
6089bafec742SSukumar Swaminathan 	{ 11, "interrupts actually allocated"	},
6090bafec742SSukumar Swaminathan 	{ 12, "interrupts on rx ring 0"		},
6091bafec742SSukumar Swaminathan 	{ 13, "interrupts on rx ring 1"		},
6092bafec742SSukumar Swaminathan 	{ 14, "interrupts on rx ring 2"		},
6093bafec742SSukumar Swaminathan 	{ 15, "interrupts on rx ring 3"		},
6094bafec742SSukumar Swaminathan 	{ 16, "interrupts on rx ring 4"		},
6095bafec742SSukumar Swaminathan 	{ 17, "interrupts on rx ring 5"		},
6096bafec742SSukumar Swaminathan 	{ 18, "interrupts on rx ring 6"		},
6097bafec742SSukumar Swaminathan 	{ 19, "interrupts on rx ring 7"		},
6098bafec742SSukumar Swaminathan 	{ 20, "polls on rx ring 0"		},
6099bafec742SSukumar Swaminathan 	{ 21, "polls on rx ring 1"		},
6100bafec742SSukumar Swaminathan 	{ 22, "polls on rx ring 2"		},
6101bafec742SSukumar Swaminathan 	{ 23, "polls on rx ring 3"		},
6102bafec742SSukumar Swaminathan 	{ 24, "polls on rx ring 4"		},
6103bafec742SSukumar Swaminathan 	{ 25, "polls on rx ring 5"		},
6104bafec742SSukumar Swaminathan 	{ 26, "polls on rx ring 6"		},
6105bafec742SSukumar Swaminathan 	{ 27, "polls on rx ring 7"		},
6106bafec742SSukumar Swaminathan 	{ 28, "tx no resource on ring 0"	},
6107bafec742SSukumar Swaminathan 	{ 29, "tx dma bind fail on ring 0"	},
6108bafec742SSukumar Swaminathan 	{ 30, "tx dma no handle on ring 0"	},
6109bafec742SSukumar Swaminathan 	{ 31, "tx dma no cookie on ring 0"	},
6110accf27a5SSukumar Swaminathan 	{ 32, "MPI firmware major version"	},
6111accf27a5SSukumar Swaminathan 	{ 33, "MPI firmware minor version"	},
6112accf27a5SSukumar Swaminathan 	{ 34, "MPI firmware sub version"	},
6113accf27a5SSukumar Swaminathan 	{ 35, "rx no resource"			},
6114bafec742SSukumar Swaminathan 
6115bafec742SSukumar Swaminathan 	{ -1, NULL},
6116bafec742SSukumar Swaminathan };
6117bafec742SSukumar Swaminathan 
6118bafec742SSukumar Swaminathan 
6119bafec742SSukumar Swaminathan /*
6120bafec742SSukumar Swaminathan  * kstat update function for device register set
6121bafec742SSukumar Swaminathan  */
6122bafec742SSukumar Swaminathan static int
ql_kstats_get_reg_and_dev_stats(kstat_t * ksp,int flag)6123bafec742SSukumar Swaminathan ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
6124bafec742SSukumar Swaminathan {
6125bafec742SSukumar Swaminathan 	qlge_t *qlge;
6126bafec742SSukumar Swaminathan 	kstat_named_t *knp;
6127bafec742SSukumar Swaminathan 	uint32_t val32;
6128bafec742SSukumar Swaminathan 	int i = 0;
6129bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
6130accf27a5SSukumar Swaminathan 	struct rx_ring *rx_ring;
6131bafec742SSukumar Swaminathan 
6132bafec742SSukumar Swaminathan 	if (flag != KSTAT_READ)
6133bafec742SSukumar Swaminathan 		return (EACCES);
6134bafec742SSukumar Swaminathan 
6135bafec742SSukumar Swaminathan 	qlge = ksp->ks_private;
6136bafec742SSukumar Swaminathan 	knp = ksp->ks_data;
6137bafec742SSukumar Swaminathan 
6138bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
6139bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
6140bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
6141bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
6142bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
6143bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
6144bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
6145bafec742SSukumar Swaminathan 
6146bafec742SSukumar Swaminathan 	if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
6147bafec742SSukumar Swaminathan 		return (0);
6148bafec742SSukumar Swaminathan 	}
61490662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
6150bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6151bafec742SSukumar Swaminathan 
61520662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
6153bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6154bafec742SSukumar Swaminathan 
61550662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
6156bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6157bafec742SSukumar Swaminathan 
61580662fbf4SSukumar Swaminathan 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
6159bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6160bafec742SSukumar Swaminathan 
6161bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
6162bafec742SSukumar Swaminathan 
6163bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->intr_cnt;
6164bafec742SSukumar Swaminathan 
6165bafec742SSukumar Swaminathan 	for (i = 0; i < 8; i++) {
6166bafec742SSukumar Swaminathan 		(knp++)->value.ui32 = qlge->rx_interrupts[i];
6167bafec742SSukumar Swaminathan 	}
6168bafec742SSukumar Swaminathan 
6169bafec742SSukumar Swaminathan 	for (i = 0; i < 8; i++) {
6170bafec742SSukumar Swaminathan 		(knp++)->value.ui32 = qlge->rx_polls[i];
6171bafec742SSukumar Swaminathan 	}
6172bafec742SSukumar Swaminathan 
6173bafec742SSukumar Swaminathan 	tx_ring = &qlge->tx_ring[0];
6174bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->defer;
6175bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
6176bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
6177bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
6178bafec742SSukumar Swaminathan 
6179bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.major_version;
6180bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.minor_version;
6181bafec742SSukumar Swaminathan 	(knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
6182bafec742SSukumar Swaminathan 
6183accf27a5SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6184accf27a5SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
6185accf27a5SSukumar Swaminathan 		val32 += rx_ring->rx_packets_dropped_no_buffer;
6186accf27a5SSukumar Swaminathan 	}
6187accf27a5SSukumar Swaminathan 	(knp++)->value.ui32 = val32;
6188accf27a5SSukumar Swaminathan 
6189bafec742SSukumar Swaminathan 	return (0);
6190bafec742SSukumar Swaminathan }
6191bafec742SSukumar Swaminathan 
6192bafec742SSukumar Swaminathan 
6193bafec742SSukumar Swaminathan static kstat_t *
ql_setup_named_kstat(qlge_t * qlge,int instance,char * name,const ql_ksindex_t * ksip,size_t size,int (* update)(kstat_t *,int))6194bafec742SSukumar Swaminathan ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
6195bafec742SSukumar Swaminathan     const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
6196bafec742SSukumar Swaminathan {
6197bafec742SSukumar Swaminathan 	kstat_t *ksp;
6198bafec742SSukumar Swaminathan 	kstat_named_t *knp;
6199bafec742SSukumar Swaminathan 	char *np;
6200bafec742SSukumar Swaminathan 	int type;
6201bafec742SSukumar Swaminathan 
6202bafec742SSukumar Swaminathan 	size /= sizeof (ql_ksindex_t);
6203bafec742SSukumar Swaminathan 	ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
6204bafec742SSukumar Swaminathan 	    KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
6205bafec742SSukumar Swaminathan 	if (ksp == NULL)
6206bafec742SSukumar Swaminathan 		return (NULL);
6207bafec742SSukumar Swaminathan 
6208bafec742SSukumar Swaminathan 	ksp->ks_private = qlge;
6209bafec742SSukumar Swaminathan 	ksp->ks_update = update;
6210bafec742SSukumar Swaminathan 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
6211bafec742SSukumar Swaminathan 		switch (*np) {
6212bafec742SSukumar Swaminathan 		default:
6213bafec742SSukumar Swaminathan 			type = KSTAT_DATA_UINT32;
6214bafec742SSukumar Swaminathan 			break;
6215bafec742SSukumar Swaminathan 		case '&':
6216bafec742SSukumar Swaminathan 			np += 1;
6217bafec742SSukumar Swaminathan 			type = KSTAT_DATA_CHAR;
6218bafec742SSukumar Swaminathan 			break;
6219bafec742SSukumar Swaminathan 		}
6220bafec742SSukumar Swaminathan 		kstat_named_init(knp, np, (uint8_t)type);
6221bafec742SSukumar Swaminathan 	}
6222bafec742SSukumar Swaminathan 	kstat_install(ksp);
6223bafec742SSukumar Swaminathan 
6224bafec742SSukumar Swaminathan 	return (ksp);
6225bafec742SSukumar Swaminathan }
6226bafec742SSukumar Swaminathan 
6227bafec742SSukumar Swaminathan /*
6228bafec742SSukumar Swaminathan  * Setup various kstat
6229bafec742SSukumar Swaminathan  */
6230bafec742SSukumar Swaminathan int
ql_init_kstats(qlge_t * qlge)6231bafec742SSukumar Swaminathan ql_init_kstats(qlge_t *qlge)
6232bafec742SSukumar Swaminathan {
6233bafec742SSukumar Swaminathan 	/* Hardware KStats */
6234bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
6235bafec742SSukumar Swaminathan 	    qlge->instance, "chip", ql_kstats_hw,
6236bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
6237bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
6238bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
6239bafec742SSukumar Swaminathan 	}
6240bafec742SSukumar Swaminathan 
6241bafec742SSukumar Swaminathan 	/* MII KStats */
6242bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
6243bafec742SSukumar Swaminathan 	    qlge->instance, "mii", ql_kstats_mii,
6244bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_mii), ql_kstats_mii_update);
6245bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
6246bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
6247bafec742SSukumar Swaminathan 	}
6248bafec742SSukumar Swaminathan 
6249bafec742SSukumar Swaminathan 	/* REG KStats */
6250bafec742SSukumar Swaminathan 	qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
6251bafec742SSukumar Swaminathan 	    qlge->instance, "reg", ql_kstats_reg,
6252bafec742SSukumar Swaminathan 	    sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
6253bafec742SSukumar Swaminathan 	if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
6254bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
6255bafec742SSukumar Swaminathan 	}
6256bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
6257bafec742SSukumar Swaminathan }
6258bafec742SSukumar Swaminathan 
6259bafec742SSukumar Swaminathan /*
6260bafec742SSukumar Swaminathan  * delete all kstat
6261bafec742SSukumar Swaminathan  */
6262bafec742SSukumar Swaminathan void
ql_fini_kstats(qlge_t * qlge)6263bafec742SSukumar Swaminathan ql_fini_kstats(qlge_t *qlge)
6264bafec742SSukumar Swaminathan {
6265bafec742SSukumar Swaminathan 	int i;
6266bafec742SSukumar Swaminathan 
6267bafec742SSukumar Swaminathan 	for (i = 0; i < QL_KSTAT_COUNT; i++) {
6268bafec742SSukumar Swaminathan 		if (qlge->ql_kstats[i] != NULL)
6269bafec742SSukumar Swaminathan 			kstat_delete(qlge->ql_kstats[i]);
6270bafec742SSukumar Swaminathan 	}
6271bafec742SSukumar Swaminathan }
6272bafec742SSukumar Swaminathan 
6273bafec742SSukumar Swaminathan /* ************************************************************************* */
6274bafec742SSukumar Swaminathan /*
6275bafec742SSukumar Swaminathan  *                                 kstat end
6276bafec742SSukumar Swaminathan  */
6277bafec742SSukumar Swaminathan /* ************************************************************************* */
6278bafec742SSukumar Swaminathan 
6279bafec742SSukumar Swaminathan /*
6280bafec742SSukumar Swaminathan  * Setup the parameters for receive and transmit rings including buffer sizes
6281bafec742SSukumar Swaminathan  * and completion queue sizes
6282bafec742SSukumar Swaminathan  */
6283bafec742SSukumar Swaminathan static int
ql_setup_rings(qlge_t * qlge)6284bafec742SSukumar Swaminathan ql_setup_rings(qlge_t *qlge)
6285bafec742SSukumar Swaminathan {
6286bafec742SSukumar Swaminathan 	uint8_t i;
6287bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
6288bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
6289bafec742SSukumar Swaminathan 	uint16_t lbq_buf_size;
6290bafec742SSukumar Swaminathan 
6291bafec742SSukumar Swaminathan 	lbq_buf_size = (uint16_t)
6292accf27a5SSukumar Swaminathan 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
6293bafec742SSukumar Swaminathan 
6294bafec742SSukumar Swaminathan 	/*
6295bafec742SSukumar Swaminathan 	 * rx_ring[0] is always the default queue.
6296bafec742SSukumar Swaminathan 	 */
6297bafec742SSukumar Swaminathan 	/*
6298bafec742SSukumar Swaminathan 	 * qlge->rx_ring_count:
6299bafec742SSukumar Swaminathan 	 * Total number of rx_rings. This includes a number
6300bafec742SSukumar Swaminathan 	 * of outbound completion handler rx_rings, and a
6301bafec742SSukumar Swaminathan 	 * number of inbound completion handler rx_rings.
6302bafec742SSukumar Swaminathan 	 * rss is only enabled if we have more than 1 rx completion
6303bafec742SSukumar Swaminathan 	 * queue. If we have a single rx completion queue
6304bafec742SSukumar Swaminathan 	 * then all rx completions go to this queue and
6305bafec742SSukumar Swaminathan 	 * the last completion queue
6306bafec742SSukumar Swaminathan 	 */
6307bafec742SSukumar Swaminathan 
6308bafec742SSukumar Swaminathan 	qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
6309bafec742SSukumar Swaminathan 
6310bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
6311bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
6312bafec742SSukumar Swaminathan 		bzero((void *)tx_ring, sizeof (*tx_ring));
6313bafec742SSukumar Swaminathan 		tx_ring->qlge = qlge;
6314bafec742SSukumar Swaminathan 		tx_ring->wq_id = i;
6315bafec742SSukumar Swaminathan 		tx_ring->wq_len = qlge->tx_ring_size;
6316bafec742SSukumar Swaminathan 		tx_ring->wq_size = (uint32_t)(
6317bafec742SSukumar Swaminathan 		    tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
6318bafec742SSukumar Swaminathan 
6319bafec742SSukumar Swaminathan 		/*
6320bafec742SSukumar Swaminathan 		 * The completion queue ID for the tx rings start
6321bafec742SSukumar Swaminathan 		 * immediately after the last rss completion queue.
6322bafec742SSukumar Swaminathan 		 */
6323bafec742SSukumar Swaminathan 		tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
6324bafec742SSukumar Swaminathan 	}
6325bafec742SSukumar Swaminathan 
6326bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
6327bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
6328bafec742SSukumar Swaminathan 		bzero((void *)rx_ring, sizeof (*rx_ring));
6329bafec742SSukumar Swaminathan 		rx_ring->qlge = qlge;
6330bafec742SSukumar Swaminathan 		rx_ring->cq_id = i;
6331bafec742SSukumar Swaminathan 		if (i != 0)
6332bafec742SSukumar Swaminathan 			rx_ring->cpu = (i) % qlge->rx_ring_count;
6333bafec742SSukumar Swaminathan 		else
6334bafec742SSukumar Swaminathan 			rx_ring->cpu = 0;
6335bafec742SSukumar Swaminathan 
6336bafec742SSukumar Swaminathan 		if (i < qlge->rss_ring_count) {
6337bafec742SSukumar Swaminathan 			/*
6338bafec742SSukumar Swaminathan 			 * Inbound completions (RSS) queues
6339bafec742SSukumar Swaminathan 			 * Default queue is queue 0 which handles
6340bafec742SSukumar Swaminathan 			 * unicast plus bcast/mcast and async events.
6341bafec742SSukumar Swaminathan 			 * Other inbound queues handle unicast frames only.
6342bafec742SSukumar Swaminathan 			 */
6343bafec742SSukumar Swaminathan 			rx_ring->cq_len = qlge->rx_ring_size;
6344bafec742SSukumar Swaminathan 			rx_ring->cq_size = (uint32_t)
6345bafec742SSukumar Swaminathan 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6346bafec742SSukumar Swaminathan 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
6347bafec742SSukumar Swaminathan 			rx_ring->lbq_size = (uint32_t)
6348bafec742SSukumar Swaminathan 			    (rx_ring->lbq_len * sizeof (uint64_t));
6349bafec742SSukumar Swaminathan 			rx_ring->lbq_buf_size = lbq_buf_size;
6350bafec742SSukumar Swaminathan 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
6351bafec742SSukumar Swaminathan 			rx_ring->sbq_size = (uint32_t)
6352bafec742SSukumar Swaminathan 			    (rx_ring->sbq_len * sizeof (uint64_t));
6353bafec742SSukumar Swaminathan 			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
6354bafec742SSukumar Swaminathan 			rx_ring->type = RX_Q;
6355bafec742SSukumar Swaminathan 
6356bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD,
6357bafec742SSukumar Swaminathan 			    ("%s(%d)Allocating rss completion queue %d "
6358bafec742SSukumar Swaminathan 			    "on cpu %d\n", __func__, qlge->instance,
6359bafec742SSukumar Swaminathan 			    rx_ring->cq_id, rx_ring->cpu));
6360bafec742SSukumar Swaminathan 		} else {
6361bafec742SSukumar Swaminathan 			/*
6362bafec742SSukumar Swaminathan 			 * Outbound queue handles outbound completions only
6363bafec742SSukumar Swaminathan 			 */
6364bafec742SSukumar Swaminathan 			/* outbound cq is same size as tx_ring it services. */
6365accf27a5SSukumar Swaminathan 			QL_PRINT(DBG_INIT, ("rx_ring 0x%p i %d\n", rx_ring, i));
6366bafec742SSukumar Swaminathan 			rx_ring->cq_len = qlge->tx_ring_size;
6367bafec742SSukumar Swaminathan 			rx_ring->cq_size = (uint32_t)
6368bafec742SSukumar Swaminathan 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6369bafec742SSukumar Swaminathan 			rx_ring->lbq_len = 0;
6370bafec742SSukumar Swaminathan 			rx_ring->lbq_size = 0;
6371bafec742SSukumar Swaminathan 			rx_ring->lbq_buf_size = 0;
6372bafec742SSukumar Swaminathan 			rx_ring->sbq_len = 0;
6373bafec742SSukumar Swaminathan 			rx_ring->sbq_size = 0;
6374bafec742SSukumar Swaminathan 			rx_ring->sbq_buf_size = 0;
6375bafec742SSukumar Swaminathan 			rx_ring->type = TX_Q;
6376bafec742SSukumar Swaminathan 
6377bafec742SSukumar Swaminathan 			QL_PRINT(DBG_GLD,
6378bafec742SSukumar Swaminathan 			    ("%s(%d)Allocating TX completion queue %d on"
6379bafec742SSukumar Swaminathan 			    " cpu %d\n", __func__, qlge->instance,
6380bafec742SSukumar Swaminathan 			    rx_ring->cq_id, rx_ring->cpu));
6381bafec742SSukumar Swaminathan 		}
6382bafec742SSukumar Swaminathan 	}
6383bafec742SSukumar Swaminathan 
6384bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
6385bafec742SSukumar Swaminathan }
6386bafec742SSukumar Swaminathan 
6387bafec742SSukumar Swaminathan static int
ql_start_rx_ring(qlge_t * qlge,struct rx_ring * rx_ring)6388bafec742SSukumar Swaminathan ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
6389bafec742SSukumar Swaminathan {
6390bafec742SSukumar Swaminathan 	struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
6391bafec742SSukumar Swaminathan 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6392bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6393bafec742SSukumar Swaminathan 	/* first shadow area is used by wqicb's host copy of consumer index */
6394bafec742SSukumar Swaminathan 	    + sizeof (uint64_t);
6395bafec742SSukumar Swaminathan 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6396bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6397bafec742SSukumar Swaminathan 	    + sizeof (uint64_t);
6398bafec742SSukumar Swaminathan 	/* lrg/sml bufq pointers */
6399bafec742SSukumar Swaminathan 	uint8_t *buf_q_base_reg =
6400bafec742SSukumar Swaminathan 	    (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
6401bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6402bafec742SSukumar Swaminathan 	uint64_t buf_q_base_reg_dma =
6403bafec742SSukumar Swaminathan 	    qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
6404bafec742SSukumar Swaminathan 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6405bafec742SSukumar Swaminathan 	caddr_t doorbell_area =
6406bafec742SSukumar Swaminathan 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
6407bafec742SSukumar Swaminathan 	int err = 0;
6408bafec742SSukumar Swaminathan 	uint16_t bq_len;
6409bafec742SSukumar Swaminathan 	uint64_t tmp;
6410bafec742SSukumar Swaminathan 	uint64_t *base_indirect_ptr;
6411bafec742SSukumar Swaminathan 	int page_entries;
6412bafec742SSukumar Swaminathan 
6413bafec742SSukumar Swaminathan 	/* Set up the shadow registers for this ring. */
6414bafec742SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg = shadow_reg;
6415bafec742SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
6416accf27a5SSukumar Swaminathan 	rx_ring->prod_idx_sh_reg_offset = (off_t)(((rx_ring->cq_id *
6417accf27a5SSukumar Swaminathan 	    sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) + sizeof (uint64_t)));
6418bafec742SSukumar Swaminathan 
6419bafec742SSukumar Swaminathan 	rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6420bafec742SSukumar Swaminathan 	rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
6421bafec742SSukumar Swaminathan 
6422bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
6423bafec742SSukumar Swaminathan 	    " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
6424bafec742SSukumar Swaminathan 	    rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
6425bafec742SSukumar Swaminathan 
6426bafec742SSukumar Swaminathan 	buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6427bafec742SSukumar Swaminathan 	buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6428bafec742SSukumar Swaminathan 	rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6429bafec742SSukumar Swaminathan 	rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
6430bafec742SSukumar Swaminathan 
6431bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x00 for consumer index register */
6432bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6433bafec742SSukumar Swaminathan 	rx_ring->cnsmr_idx = 0;
6434bafec742SSukumar Swaminathan 	*rx_ring->prod_idx_sh_reg = 0;
6435bafec742SSukumar Swaminathan 	rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
6436bafec742SSukumar Swaminathan 
6437bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x04 for valid register */
6438bafec742SSukumar Swaminathan 	rx_ring->valid_db_reg = (uint32_t *)(void *)
6439bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6440bafec742SSukumar Swaminathan 
6441bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
6442bafec742SSukumar Swaminathan 	rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
6443bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x18);
6444bafec742SSukumar Swaminathan 
6445bafec742SSukumar Swaminathan 	/* PCI doorbell mem area + 0x1c */
6446bafec742SSukumar Swaminathan 	rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
6447bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x1c);
6448bafec742SSukumar Swaminathan 
6449bafec742SSukumar Swaminathan 	bzero((void *)cqicb, sizeof (*cqicb));
6450bafec742SSukumar Swaminathan 
6451bafec742SSukumar Swaminathan 	cqicb->msix_vect = (uint8_t)rx_ring->irq;
6452bafec742SSukumar Swaminathan 
6453bafec742SSukumar Swaminathan 	bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
6454bafec742SSukumar Swaminathan 	    (uint16_t)0 : (uint16_t)rx_ring->cq_len);
6455bafec742SSukumar Swaminathan 	cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
6456bafec742SSukumar Swaminathan 
6457bafec742SSukumar Swaminathan 	cqicb->cq_base_addr_lo =
6458bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
6459bafec742SSukumar Swaminathan 	cqicb->cq_base_addr_hi =
6460bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
6461bafec742SSukumar Swaminathan 
6462bafec742SSukumar Swaminathan 	cqicb->prod_idx_addr_lo =
6463bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6464bafec742SSukumar Swaminathan 	cqicb->prod_idx_addr_hi =
6465bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6466bafec742SSukumar Swaminathan 
6467bafec742SSukumar Swaminathan 	/*
6468bafec742SSukumar Swaminathan 	 * Set up the control block load flags.
6469bafec742SSukumar Swaminathan 	 */
6470bafec742SSukumar Swaminathan 	cqicb->flags = FLAGS_LC | /* Load queue base address */
6471bafec742SSukumar Swaminathan 	    FLAGS_LV | /* Load MSI-X vector */
6472bafec742SSukumar Swaminathan 	    FLAGS_LI;  /* Load irq delay values */
6473bafec742SSukumar Swaminathan 	if (rx_ring->lbq_len) {
6474bafec742SSukumar Swaminathan 		/* Load lbq values */
6475bafec742SSukumar Swaminathan 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
6476bafec742SSukumar Swaminathan 		tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
6477bafec742SSukumar Swaminathan 		base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
6478bafec742SSukumar Swaminathan 		page_entries = 0;
6479bafec742SSukumar Swaminathan 		do {
6480bafec742SSukumar Swaminathan 			*base_indirect_ptr = cpu_to_le64(tmp);
6481bafec742SSukumar Swaminathan 			tmp += VM_PAGE_SIZE;
6482bafec742SSukumar Swaminathan 			base_indirect_ptr++;
6483bafec742SSukumar Swaminathan 			page_entries++;
6484bafec742SSukumar Swaminathan 		} while (page_entries < (int)(
6485bafec742SSukumar Swaminathan 		    ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6486bafec742SSukumar Swaminathan 
6487bafec742SSukumar Swaminathan 		cqicb->lbq_addr_lo =
6488bafec742SSukumar Swaminathan 		    cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
6489bafec742SSukumar Swaminathan 		cqicb->lbq_addr_hi =
6490bafec742SSukumar Swaminathan 		    cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
6491bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
6492bafec742SSukumar Swaminathan 		    (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
6493bafec742SSukumar Swaminathan 		cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
6494bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
6495bafec742SSukumar Swaminathan 		    (uint16_t)rx_ring->lbq_len);
6496bafec742SSukumar Swaminathan 		cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
6497bafec742SSukumar Swaminathan 		rx_ring->lbq_prod_idx = 0;
6498bafec742SSukumar Swaminathan 		rx_ring->lbq_curr_idx = 0;
6499bafec742SSukumar Swaminathan 	}
6500bafec742SSukumar Swaminathan 	if (rx_ring->sbq_len) {
6501bafec742SSukumar Swaminathan 		/* Load sbq values */
6502bafec742SSukumar Swaminathan 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
6503bafec742SSukumar Swaminathan 		tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
6504bafec742SSukumar Swaminathan 		base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
6505bafec742SSukumar Swaminathan 		page_entries = 0;
6506bafec742SSukumar Swaminathan 
6507bafec742SSukumar Swaminathan 		do {
6508bafec742SSukumar Swaminathan 			*base_indirect_ptr = cpu_to_le64(tmp);
6509bafec742SSukumar Swaminathan 			tmp += VM_PAGE_SIZE;
6510bafec742SSukumar Swaminathan 			base_indirect_ptr++;
6511bafec742SSukumar Swaminathan 			page_entries++;
6512bafec742SSukumar Swaminathan 		} while (page_entries < (uint32_t)
6513bafec742SSukumar Swaminathan 		    (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6514bafec742SSukumar Swaminathan 
6515bafec742SSukumar Swaminathan 		cqicb->sbq_addr_lo =
6516bafec742SSukumar Swaminathan 		    cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
6517bafec742SSukumar Swaminathan 		cqicb->sbq_addr_hi =
6518bafec742SSukumar Swaminathan 		    cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
6519bafec742SSukumar Swaminathan 		cqicb->sbq_buf_size = (uint16_t)
6520bafec742SSukumar Swaminathan 		    cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
6521bafec742SSukumar Swaminathan 		bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
6522bafec742SSukumar Swaminathan 		    (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
6523bafec742SSukumar Swaminathan 		cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
6524bafec742SSukumar Swaminathan 		rx_ring->sbq_prod_idx = 0;
6525bafec742SSukumar Swaminathan 		rx_ring->sbq_curr_idx = 0;
6526bafec742SSukumar Swaminathan 	}
6527bafec742SSukumar Swaminathan 	switch (rx_ring->type) {
6528bafec742SSukumar Swaminathan 	case TX_Q:
6529bafec742SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
6530bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->tx_coalesce_usecs);
6531bafec742SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
6532bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->tx_max_coalesced_frames);
6533bafec742SSukumar Swaminathan 		break;
6534bafec742SSukumar Swaminathan 
6535bafec742SSukumar Swaminathan 	case DEFAULT_Q:
6536accf27a5SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
6537accf27a5SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6538accf27a5SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
6539accf27a5SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6540bafec742SSukumar Swaminathan 		break;
6541bafec742SSukumar Swaminathan 
6542bafec742SSukumar Swaminathan 	case RX_Q:
6543bafec742SSukumar Swaminathan 		/*
6544bafec742SSukumar Swaminathan 		 * Inbound completion handling rx_rings run in
6545bafec742SSukumar Swaminathan 		 * separate NAPI contexts.
6546bafec742SSukumar Swaminathan 		 */
6547bafec742SSukumar Swaminathan 		cqicb->irq_delay = (uint16_t)
6548bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6549bafec742SSukumar Swaminathan 		cqicb->pkt_delay = (uint16_t)
6550bafec742SSukumar Swaminathan 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6551bafec742SSukumar Swaminathan 		break;
6552bafec742SSukumar Swaminathan 	default:
6553bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
6554bafec742SSukumar Swaminathan 		    rx_ring->type);
6555bafec742SSukumar Swaminathan 	}
6556bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
6557bafec742SSukumar Swaminathan 	    rx_ring->cq_id));
6558bafec742SSukumar Swaminathan 	/* QL_DUMP_CQICB(qlge, cqicb); */
6559bafec742SSukumar Swaminathan 	err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
6560bafec742SSukumar Swaminathan 	    rx_ring->cq_id);
6561bafec742SSukumar Swaminathan 	if (err) {
6562bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load CQICB.");
6563bafec742SSukumar Swaminathan 		return (err);
6564bafec742SSukumar Swaminathan 	}
6565bafec742SSukumar Swaminathan 
6566bafec742SSukumar Swaminathan 	rx_ring->rx_packets_dropped_no_buffer = 0;
6567bafec742SSukumar Swaminathan 	rx_ring->rx_pkt_dropped_mac_unenabled = 0;
6568bafec742SSukumar Swaminathan 	rx_ring->rx_failed_sbq_allocs = 0;
6569bafec742SSukumar Swaminathan 	rx_ring->rx_failed_lbq_allocs = 0;
6570bafec742SSukumar Swaminathan 	rx_ring->rx_packets = 0;
6571bafec742SSukumar Swaminathan 	rx_ring->rx_bytes = 0;
6572bafec742SSukumar Swaminathan 	rx_ring->frame_too_long = 0;
6573bafec742SSukumar Swaminathan 	rx_ring->frame_too_short = 0;
6574bafec742SSukumar Swaminathan 	rx_ring->fcs_err = 0;
6575bafec742SSukumar Swaminathan 
6576bafec742SSukumar Swaminathan 	return (err);
6577bafec742SSukumar Swaminathan }
6578bafec742SSukumar Swaminathan 
6579bafec742SSukumar Swaminathan /*
6580bafec742SSukumar Swaminathan  * start RSS
6581bafec742SSukumar Swaminathan  */
6582bafec742SSukumar Swaminathan static int
ql_start_rss(qlge_t * qlge)6583bafec742SSukumar Swaminathan ql_start_rss(qlge_t *qlge)
6584bafec742SSukumar Swaminathan {
6585bafec742SSukumar Swaminathan 	struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
6586bafec742SSukumar Swaminathan 	int status = 0;
6587bafec742SSukumar Swaminathan 	int i;
6588bafec742SSukumar Swaminathan 	uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
6589bafec742SSukumar Swaminathan 
6590bafec742SSukumar Swaminathan 	bzero((void *)ricb, sizeof (*ricb));
6591bafec742SSukumar Swaminathan 
6592bafec742SSukumar Swaminathan 	ricb->base_cq = RSS_L4K;
6593bafec742SSukumar Swaminathan 	ricb->flags =
6594bafec742SSukumar Swaminathan 	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
6595bafec742SSukumar Swaminathan 	    RSS_RT6);
6596bafec742SSukumar Swaminathan 	ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
6597bafec742SSukumar Swaminathan 
6598bafec742SSukumar Swaminathan 	/*
6599bafec742SSukumar Swaminathan 	 * Fill out the Indirection Table.
6600bafec742SSukumar Swaminathan 	 */
6601bafec742SSukumar Swaminathan 	for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
6602bafec742SSukumar Swaminathan 		hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
6603bafec742SSukumar Swaminathan 
6604bafec742SSukumar Swaminathan 	(void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
6605bafec742SSukumar Swaminathan 	(void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
6606bafec742SSukumar Swaminathan 
6607bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
6608bafec742SSukumar Swaminathan 
6609bafec742SSukumar Swaminathan 	status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
6610bafec742SSukumar Swaminathan 	if (status) {
6611bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load RICB.");
6612bafec742SSukumar Swaminathan 		return (status);
6613bafec742SSukumar Swaminathan 	}
6614bafec742SSukumar Swaminathan 
6615bafec742SSukumar Swaminathan 	return (status);
6616bafec742SSukumar Swaminathan }
6617bafec742SSukumar Swaminathan 
6618bafec742SSukumar Swaminathan /*
6619bafec742SSukumar Swaminathan  * load a tx ring control block to hw and start this ring
6620bafec742SSukumar Swaminathan  */
6621bafec742SSukumar Swaminathan static int
ql_start_tx_ring(qlge_t * qlge,struct tx_ring * tx_ring)6622bafec742SSukumar Swaminathan ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
6623bafec742SSukumar Swaminathan {
6624bafec742SSukumar Swaminathan 	struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
6625bafec742SSukumar Swaminathan 	caddr_t doorbell_area =
6626bafec742SSukumar Swaminathan 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
6627bafec742SSukumar Swaminathan 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6628bafec742SSukumar Swaminathan 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6629bafec742SSukumar Swaminathan 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6630bafec742SSukumar Swaminathan 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6631bafec742SSukumar Swaminathan 	int err = 0;
6632bafec742SSukumar Swaminathan 
6633bafec742SSukumar Swaminathan 	/*
6634bafec742SSukumar Swaminathan 	 * Assign doorbell registers for this tx_ring.
6635bafec742SSukumar Swaminathan 	 */
6636bafec742SSukumar Swaminathan 
6637bafec742SSukumar Swaminathan 	/* TX PCI doorbell mem area for tx producer index */
6638bafec742SSukumar Swaminathan 	tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6639bafec742SSukumar Swaminathan 	tx_ring->prod_idx = 0;
6640bafec742SSukumar Swaminathan 	/* TX PCI doorbell mem area + 0x04 */
6641bafec742SSukumar Swaminathan 	tx_ring->valid_db_reg = (uint32_t *)(void *)
6642bafec742SSukumar Swaminathan 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6643bafec742SSukumar Swaminathan 
6644bafec742SSukumar Swaminathan 	/*
6645bafec742SSukumar Swaminathan 	 * Assign shadow registers for this tx_ring.
6646bafec742SSukumar Swaminathan 	 */
6647bafec742SSukumar Swaminathan 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
6648bafec742SSukumar Swaminathan 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
6649bafec742SSukumar Swaminathan 	*tx_ring->cnsmr_idx_sh_reg = 0;
6650bafec742SSukumar Swaminathan 
6651bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
6652bafec742SSukumar Swaminathan 	    " phys_addr 0x%lx\n",
6653bafec742SSukumar Swaminathan 	    __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
6654bafec742SSukumar Swaminathan 	    tx_ring->cnsmr_idx_sh_reg_dma));
6655bafec742SSukumar Swaminathan 
6656bafec742SSukumar Swaminathan 	wqicb->len =
6657bafec742SSukumar Swaminathan 	    (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
6658bafec742SSukumar Swaminathan 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
6659bafec742SSukumar Swaminathan 	    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
6660bafec742SSukumar Swaminathan 	wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
6661bafec742SSukumar Swaminathan 	wqicb->rid = 0;
6662bafec742SSukumar Swaminathan 	wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
6663bafec742SSukumar Swaminathan 	wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
6664bafec742SSukumar Swaminathan 	wqicb->cnsmr_idx_addr_lo =
6665bafec742SSukumar Swaminathan 	    cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6666bafec742SSukumar Swaminathan 	wqicb->cnsmr_idx_addr_hi =
6667bafec742SSukumar Swaminathan 	    cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6668bafec742SSukumar Swaminathan 
6669bafec742SSukumar Swaminathan 	ql_init_tx_ring(tx_ring);
6670bafec742SSukumar Swaminathan 	/* QL_DUMP_WQICB(qlge, wqicb); */
6671bafec742SSukumar Swaminathan 	err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6672bafec742SSukumar Swaminathan 	    tx_ring->wq_id);
6673bafec742SSukumar Swaminathan 
6674bafec742SSukumar Swaminathan 	if (err) {
6675bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Failed to load WQICB.");
6676bafec742SSukumar Swaminathan 		return (err);
6677bafec742SSukumar Swaminathan 	}
6678bafec742SSukumar Swaminathan 	return (err);
6679bafec742SSukumar Swaminathan }
6680bafec742SSukumar Swaminathan 
6681bafec742SSukumar Swaminathan /*
6682bafec742SSukumar Swaminathan  * Set up a MAC, multicast or VLAN address for the
6683bafec742SSukumar Swaminathan  * inbound frame matching.
6684bafec742SSukumar Swaminathan  */
6685bafec742SSukumar Swaminathan int
ql_set_mac_addr_reg(qlge_t * qlge,uint8_t * addr,uint32_t type,uint16_t index)6686bafec742SSukumar Swaminathan ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6687bafec742SSukumar Swaminathan     uint16_t index)
6688bafec742SSukumar Swaminathan {
6689bafec742SSukumar Swaminathan 	uint32_t offset = 0;
6690bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
6691bafec742SSukumar Swaminathan 
6692bafec742SSukumar Swaminathan 	switch (type) {
6693bafec742SSukumar Swaminathan 	case MAC_ADDR_TYPE_MULTI_MAC:
6694bafec742SSukumar Swaminathan 	case MAC_ADDR_TYPE_CAM_MAC: {
6695bafec742SSukumar Swaminathan 		uint32_t cam_output;
6696bafec742SSukumar Swaminathan 		uint32_t upper = (addr[0] << 8) | addr[1];
6697bafec742SSukumar Swaminathan 		uint32_t lower =
6698bafec742SSukumar Swaminathan 		    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6699bafec742SSukumar Swaminathan 		    (addr[5]);
6700bafec742SSukumar Swaminathan 
6701bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6702bafec742SSukumar Swaminathan 		    MAC_ADDR_TYPE_MULTI_MAC) ?
6703bafec742SSukumar Swaminathan 		    "MULTICAST" : "UNICAST"));
6704bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT,
6705bafec742SSukumar Swaminathan 		    ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6706bafec742SSukumar Swaminathan 		    "the CAM.\n",
6707bafec742SSukumar Swaminathan 		    addr[0], addr[1], addr[2], addr[3], addr[4],
6708bafec742SSukumar Swaminathan 		    addr[5], index));
6709bafec742SSukumar Swaminathan 
6710bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6711bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6712bafec742SSukumar Swaminathan 		if (status)
6713bafec742SSukumar Swaminathan 			goto exit;
6714bafec742SSukumar Swaminathan 		/* offset 0 - lower 32 bits of the MAC address */
6715bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6716bafec742SSukumar Swaminathan 		    (offset++) |
6717bafec742SSukumar Swaminathan 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6718bafec742SSukumar Swaminathan 		    type);	/* type */
6719bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6720bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6721bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6722bafec742SSukumar Swaminathan 		if (status)
6723bafec742SSukumar Swaminathan 			goto exit;
6724bafec742SSukumar Swaminathan 		/* offset 1 - upper 16 bits of the MAC address */
6725bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6726bafec742SSukumar Swaminathan 		    (offset++) |
6727bafec742SSukumar Swaminathan 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6728bafec742SSukumar Swaminathan 		    type);	/* type */
6729bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6730bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge,
6731bafec742SSukumar Swaminathan 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6732bafec742SSukumar Swaminathan 		if (status)
6733bafec742SSukumar Swaminathan 			goto exit;
6734bafec742SSukumar Swaminathan 		/* offset 2 - CQ ID associated with this MAC address */
6735bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6736bafec742SSukumar Swaminathan 		    (offset) | (index << MAC_ADDR_IDX_SHIFT) |	/* index */
6737bafec742SSukumar Swaminathan 		    type);	/* type */
6738bafec742SSukumar Swaminathan 		/*
6739bafec742SSukumar Swaminathan 		 * This field should also include the queue id
6740bafec742SSukumar Swaminathan 		 * and possibly the function id.  Right now we hardcode
6741bafec742SSukumar Swaminathan 		 * the route field to NIC core.
6742bafec742SSukumar Swaminathan 		 */
6743bafec742SSukumar Swaminathan 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
6744bafec742SSukumar Swaminathan 			cam_output = (CAM_OUT_ROUTE_NIC |
6745bafec742SSukumar Swaminathan 			    (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6746bafec742SSukumar Swaminathan 			    (0 <<
6747bafec742SSukumar Swaminathan 			    CAM_OUT_CQ_ID_SHIFT));
6748bafec742SSukumar Swaminathan 
6749bafec742SSukumar Swaminathan 			/* route to NIC core */
6750bafec742SSukumar Swaminathan 			ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6751bafec742SSukumar Swaminathan 			    cam_output);
6752bafec742SSukumar Swaminathan 			}
6753bafec742SSukumar Swaminathan 		break;
6754bafec742SSukumar Swaminathan 		}
6755bafec742SSukumar Swaminathan 	default:
6756bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6757bafec742SSukumar Swaminathan 		    "Address type %d not yet supported.", type);
6758bafec742SSukumar Swaminathan 		status = DDI_FAILURE;
6759bafec742SSukumar Swaminathan 	}
6760bafec742SSukumar Swaminathan exit:
6761bafec742SSukumar Swaminathan 	return (status);
6762bafec742SSukumar Swaminathan }
6763bafec742SSukumar Swaminathan 
6764bafec742SSukumar Swaminathan /*
6765bafec742SSukumar Swaminathan  * The NIC function for this chip has 16 routing indexes.  Each one can be used
6766bafec742SSukumar Swaminathan  * to route different frame types to various inbound queues.  We send broadcast
6767bafec742SSukumar Swaminathan  * multicast/error frames to the default queue for slow handling,
6768bafec742SSukumar Swaminathan  * and CAM hit/RSS frames to the fast handling queues.
6769bafec742SSukumar Swaminathan  */
6770bafec742SSukumar Swaminathan static int
ql_set_routing_reg(qlge_t * qlge,uint32_t index,uint32_t mask,int enable)6771bafec742SSukumar Swaminathan ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6772bafec742SSukumar Swaminathan {
6773bafec742SSukumar Swaminathan 	int status;
6774bafec742SSukumar Swaminathan 	uint32_t value = 0;
6775bafec742SSukumar Swaminathan 
6776bafec742SSukumar Swaminathan 	QL_PRINT(DBG_INIT,
6777bafec742SSukumar Swaminathan 	    ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6778bafec742SSukumar Swaminathan 	    (enable ? "Adding" : "Removing"),
6779bafec742SSukumar Swaminathan 	    ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6780bafec742SSukumar Swaminathan 	    ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6781bafec742SSukumar Swaminathan 	    ((index ==
6782bafec742SSukumar Swaminathan 	    RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6783bafec742SSukumar Swaminathan 	    ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6784bafec742SSukumar Swaminathan 	    ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6785bafec742SSukumar Swaminathan 	    ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6786bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6787bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6788bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6789bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6790bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6791bafec742SSukumar Swaminathan 	    ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6792bafec742SSukumar Swaminathan 	    ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6793bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6794bafec742SSukumar Swaminathan 	    ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6795bafec742SSukumar Swaminathan 	    ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6796bafec742SSukumar Swaminathan 	    (enable ? "to" : "from")));
6797bafec742SSukumar Swaminathan 
6798bafec742SSukumar Swaminathan 	switch (mask) {
6799bafec742SSukumar Swaminathan 	case RT_IDX_CAM_HIT:
6800bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q | /* dest */
6801bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ | /* type */
6802bafec742SSukumar Swaminathan 		    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6803bafec742SSukumar Swaminathan 		break;
6804bafec742SSukumar Swaminathan 
6805bafec742SSukumar Swaminathan 	case RT_IDX_VALID: /* Promiscuous Mode frames. */
6806bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6807bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6808bafec742SSukumar Swaminathan 		    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6809bafec742SSukumar Swaminathan 		break;
6810bafec742SSukumar Swaminathan 
6811bafec742SSukumar Swaminathan 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
6812bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6813bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6814bafec742SSukumar Swaminathan 		    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6815bafec742SSukumar Swaminathan 		break;
6816bafec742SSukumar Swaminathan 
6817bafec742SSukumar Swaminathan 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
6818bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6819bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6820bafec742SSukumar Swaminathan 		    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6821bafec742SSukumar Swaminathan 		break;
6822bafec742SSukumar Swaminathan 
6823bafec742SSukumar Swaminathan 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
6824bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q |	/* dest */
6825bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6826bafec742SSukumar Swaminathan 		    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6827bafec742SSukumar Swaminathan 		break;
6828bafec742SSukumar Swaminathan 
6829bafec742SSukumar Swaminathan 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
6830bafec742SSukumar Swaminathan 		value = RT_IDX_DST_CAM_Q |	/* dest */
6831bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6832bafec742SSukumar Swaminathan 		    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6833bafec742SSukumar Swaminathan 		break;
6834bafec742SSukumar Swaminathan 
6835bafec742SSukumar Swaminathan 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
6836bafec742SSukumar Swaminathan 		value = RT_IDX_DST_RSS |	/* dest */
6837bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6838bafec742SSukumar Swaminathan 		    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6839bafec742SSukumar Swaminathan 		break;
6840bafec742SSukumar Swaminathan 
6841bafec742SSukumar Swaminathan 	case 0:	/* Clear the E-bit on an entry. */
6842bafec742SSukumar Swaminathan 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6843bafec742SSukumar Swaminathan 		    RT_IDX_TYPE_NICQ |	/* type */
6844bafec742SSukumar Swaminathan 		    (index << RT_IDX_IDX_SHIFT); /* index */
6845bafec742SSukumar Swaminathan 		break;
6846bafec742SSukumar Swaminathan 
6847bafec742SSukumar Swaminathan 	default:
6848bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "Mask type %d not yet supported.",
6849bafec742SSukumar Swaminathan 		    mask);
6850bafec742SSukumar Swaminathan 		status = -EPERM;
6851bafec742SSukumar Swaminathan 		goto exit;
6852bafec742SSukumar Swaminathan 	}
6853bafec742SSukumar Swaminathan 
6854bafec742SSukumar Swaminathan 	if (value != 0) {
6855bafec742SSukumar Swaminathan 		status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6856bafec742SSukumar Swaminathan 		if (status)
6857bafec742SSukumar Swaminathan 			goto exit;
6858bafec742SSukumar Swaminathan 		value |= (enable ? RT_IDX_E : 0);
6859bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6860bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6861bafec742SSukumar Swaminathan 	}
6862bafec742SSukumar Swaminathan 
6863bafec742SSukumar Swaminathan exit:
6864bafec742SSukumar Swaminathan 	return (status);
6865bafec742SSukumar Swaminathan }
6866bafec742SSukumar Swaminathan 
6867bafec742SSukumar Swaminathan /*
6868bafec742SSukumar Swaminathan  * Clear all the entries in the routing table.
6869bafec742SSukumar Swaminathan  * Caller must get semaphore in advance.
6870bafec742SSukumar Swaminathan  */
6871bafec742SSukumar Swaminathan 
6872bafec742SSukumar Swaminathan static int
ql_stop_routing(qlge_t * qlge)6873bafec742SSukumar Swaminathan ql_stop_routing(qlge_t *qlge)
6874bafec742SSukumar Swaminathan {
6875bafec742SSukumar Swaminathan 	int status = 0;
6876bafec742SSukumar Swaminathan 	int i;
6877bafec742SSukumar Swaminathan 	/* Clear all the entries in the routing table. */
6878bafec742SSukumar Swaminathan 	for (i = 0; i < 16; i++) {
6879bafec742SSukumar Swaminathan 		status = ql_set_routing_reg(qlge, i, 0, 0);
6880bafec742SSukumar Swaminathan 		if (status) {
6881bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Stop routing failed. ");
6882bafec742SSukumar Swaminathan 		}
6883bafec742SSukumar Swaminathan 	}
6884bafec742SSukumar Swaminathan 	return (status);
6885bafec742SSukumar Swaminathan }
6886bafec742SSukumar Swaminathan 
6887bafec742SSukumar Swaminathan /* Initialize the frame-to-queue routing. */
6888cddcb3daSSukumar Swaminathan int
ql_route_initialize(qlge_t * qlge)6889bafec742SSukumar Swaminathan ql_route_initialize(qlge_t *qlge)
6890bafec742SSukumar Swaminathan {
6891bafec742SSukumar Swaminathan 	int status = 0;
6892bafec742SSukumar Swaminathan 
6893bafec742SSukumar Swaminathan 	status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6894bafec742SSukumar Swaminathan 	if (status != DDI_SUCCESS)
6895bafec742SSukumar Swaminathan 		return (status);
6896bafec742SSukumar Swaminathan 
6897bafec742SSukumar Swaminathan 	/* Clear all the entries in the routing table. */
6898bafec742SSukumar Swaminathan 	status = ql_stop_routing(qlge);
6899bafec742SSukumar Swaminathan 	if (status) {
6900bafec742SSukumar Swaminathan 		goto exit;
6901bafec742SSukumar Swaminathan 	}
6902bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6903bafec742SSukumar Swaminathan 	if (status) {
6904bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6905bafec742SSukumar Swaminathan 		    "Failed to init routing register for broadcast packets.");
6906bafec742SSukumar Swaminathan 		goto exit;
6907bafec742SSukumar Swaminathan 	}
6908bafec742SSukumar Swaminathan 	/*
6909bafec742SSukumar Swaminathan 	 * If we have more than one inbound queue, then turn on RSS in the
6910bafec742SSukumar Swaminathan 	 * routing block.
6911bafec742SSukumar Swaminathan 	 */
6912bafec742SSukumar Swaminathan 	if (qlge->rss_ring_count > 1) {
6913bafec742SSukumar Swaminathan 		status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6914bafec742SSukumar Swaminathan 		    RT_IDX_RSS_MATCH, 1);
6915bafec742SSukumar Swaminathan 		if (status) {
6916bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
6917bafec742SSukumar Swaminathan 			    "Failed to init routing register for MATCH RSS "
6918bafec742SSukumar Swaminathan 			    "packets.");
6919bafec742SSukumar Swaminathan 			goto exit;
6920bafec742SSukumar Swaminathan 		}
6921bafec742SSukumar Swaminathan 	}
6922bafec742SSukumar Swaminathan 
6923bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6924bafec742SSukumar Swaminathan 	    RT_IDX_CAM_HIT, 1);
6925bafec742SSukumar Swaminathan 	if (status) {
6926bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6927bafec742SSukumar Swaminathan 		    "Failed to init routing register for CAM packets.");
6928bafec742SSukumar Swaminathan 		goto exit;
6929bafec742SSukumar Swaminathan 	}
6930bafec742SSukumar Swaminathan 
6931bafec742SSukumar Swaminathan 	status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6932bafec742SSukumar Swaminathan 	    RT_IDX_MCAST_MATCH, 1);
6933bafec742SSukumar Swaminathan 	if (status) {
6934bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
6935bafec742SSukumar Swaminathan 		    "Failed to init routing register for Multicast "
6936bafec742SSukumar Swaminathan 		    "packets.");
6937bafec742SSukumar Swaminathan 	}
6938bafec742SSukumar Swaminathan 
6939bafec742SSukumar Swaminathan exit:
6940bafec742SSukumar Swaminathan 	ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6941bafec742SSukumar Swaminathan 	return (status);
6942bafec742SSukumar Swaminathan }
6943bafec742SSukumar Swaminathan 
6944bafec742SSukumar Swaminathan /*
6945bafec742SSukumar Swaminathan  * Initialize hardware
6946bafec742SSukumar Swaminathan  */
6947bafec742SSukumar Swaminathan static int
ql_device_initialize(qlge_t * qlge)6948bafec742SSukumar Swaminathan ql_device_initialize(qlge_t *qlge)
6949bafec742SSukumar Swaminathan {
6950accf27a5SSukumar Swaminathan 	uint32_t value, mask;
6951bafec742SSukumar Swaminathan 	int i;
6952bafec742SSukumar Swaminathan 	int status = 0;
6953bafec742SSukumar Swaminathan 	uint16_t pause = PAUSE_MODE_DISABLED;
6954bafec742SSukumar Swaminathan 	boolean_t update_port_config = B_FALSE;
6955accf27a5SSukumar Swaminathan 	uint32_t pause_bit_mask;
6956accf27a5SSukumar Swaminathan 	boolean_t dcbx_enable = B_FALSE;
6957accf27a5SSukumar Swaminathan 	uint32_t dcbx_bit_mask = 0x10;
6958bafec742SSukumar Swaminathan 	/*
6959bafec742SSukumar Swaminathan 	 * Set up the System register to halt on errors.
6960bafec742SSukumar Swaminathan 	 */
6961bafec742SSukumar Swaminathan 	value = SYS_EFE | SYS_FAE;
6962bafec742SSukumar Swaminathan 	mask = value << 16;
6963bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_SYSTEM, mask | value);
6964bafec742SSukumar Swaminathan 
6965bafec742SSukumar Swaminathan 	/* Set the default queue. */
6966bafec742SSukumar Swaminathan 	value = NIC_RCV_CFG_DFQ;
6967bafec742SSukumar Swaminathan 	mask = NIC_RCV_CFG_DFQ_MASK;
6968bafec742SSukumar Swaminathan 
6969bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6970bafec742SSukumar Swaminathan 
6971bafec742SSukumar Swaminathan 	/* Enable the MPI interrupt. */
6972bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6973bafec742SSukumar Swaminathan 	    | INTR_MASK_PI);
6974bafec742SSukumar Swaminathan 	/* Enable the function, set pagesize, enable error checking. */
6975bafec742SSukumar Swaminathan 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6976bafec742SSukumar Swaminathan 	    FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6977bafec742SSukumar Swaminathan 	/* Set/clear header splitting. */
6978bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6979bafec742SSukumar Swaminathan 		value |= FSC_SH;
6980bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6981bafec742SSukumar Swaminathan 	}
6982bafec742SSukumar Swaminathan 	mask = FSC_VM_PAGESIZE_MASK |
6983bafec742SSukumar Swaminathan 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6984bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6985bafec742SSukumar Swaminathan 	/*
6986bafec742SSukumar Swaminathan 	 * check current port max frame size, if different from OS setting,
6987bafec742SSukumar Swaminathan 	 * then we need to change
6988bafec742SSukumar Swaminathan 	 */
6989accf27a5SSukumar Swaminathan 	qlge->max_frame_size =
6990bafec742SSukumar Swaminathan 	    (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6991bafec742SSukumar Swaminathan 
6992accf27a5SSukumar Swaminathan 	mutex_enter(&qlge->mbx_mutex);
6993accf27a5SSukumar Swaminathan 	status = ql_get_port_cfg(qlge);
6994accf27a5SSukumar Swaminathan 	mutex_exit(&qlge->mbx_mutex);
6995accf27a5SSukumar Swaminathan 
6996accf27a5SSukumar Swaminathan 	if (status == DDI_SUCCESS) {
6997accf27a5SSukumar Swaminathan 		/* if current frame size is smaller than required size */
6998accf27a5SSukumar Swaminathan 		if (qlge->port_cfg_info.max_frame_size <
6999accf27a5SSukumar Swaminathan 		    qlge->max_frame_size) {
7000bafec742SSukumar Swaminathan 			QL_PRINT(DBG_MBX,
7001bafec742SSukumar Swaminathan 			    ("update frame size, current %d, new %d\n",
7002bafec742SSukumar Swaminathan 			    qlge->port_cfg_info.max_frame_size,
7003accf27a5SSukumar Swaminathan 			    qlge->max_frame_size));
7004bafec742SSukumar Swaminathan 			qlge->port_cfg_info.max_frame_size =
7005accf27a5SSukumar Swaminathan 			    qlge->max_frame_size;
7006accf27a5SSukumar Swaminathan 			qlge->port_cfg_info.link_cfg |= ENABLE_JUMBO;
7007bafec742SSukumar Swaminathan 			update_port_config = B_TRUE;
7008bafec742SSukumar Swaminathan 		}
7009accf27a5SSukumar Swaminathan 
7010bafec742SSukumar Swaminathan 		if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
7011bafec742SSukumar Swaminathan 			pause = PAUSE_MODE_STANDARD;
7012bafec742SSukumar Swaminathan 		else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
7013bafec742SSukumar Swaminathan 			pause = PAUSE_MODE_PER_PRIORITY;
7014accf27a5SSukumar Swaminathan 
7015bafec742SSukumar Swaminathan 		if (pause != qlge->pause) {
7016accf27a5SSukumar Swaminathan 			pause_bit_mask = 0x60;	/* bit 5-6 */
7017accf27a5SSukumar Swaminathan 			/* clear pause bits */
7018accf27a5SSukumar Swaminathan 			qlge->port_cfg_info.link_cfg &= ~pause_bit_mask;
7019accf27a5SSukumar Swaminathan 			if (qlge->pause == PAUSE_MODE_STANDARD)
7020accf27a5SSukumar Swaminathan 				qlge->port_cfg_info.link_cfg |= STD_PAUSE;
7021accf27a5SSukumar Swaminathan 			else if (qlge->pause == PAUSE_MODE_PER_PRIORITY)
7022accf27a5SSukumar Swaminathan 				qlge->port_cfg_info.link_cfg |= PP_PAUSE;
7023bafec742SSukumar Swaminathan 			update_port_config = B_TRUE;
7024bafec742SSukumar Swaminathan 		}
7025accf27a5SSukumar Swaminathan 
7026accf27a5SSukumar Swaminathan 		if (qlge->port_cfg_info.link_cfg & DCBX_ENABLE)
7027accf27a5SSukumar Swaminathan 			dcbx_enable = B_TRUE;
7028accf27a5SSukumar Swaminathan 		if (dcbx_enable != qlge->dcbx_enable) {
7029accf27a5SSukumar Swaminathan 			qlge->port_cfg_info.link_cfg &= ~dcbx_bit_mask;
7030accf27a5SSukumar Swaminathan 			if (qlge->dcbx_enable)
7031accf27a5SSukumar Swaminathan 				qlge->port_cfg_info.link_cfg |= DCBX_ENABLE;
7032accf27a5SSukumar Swaminathan 		}
7033accf27a5SSukumar Swaminathan 
7034bafec742SSukumar Swaminathan 		update_port_config = B_TRUE;
7035bafec742SSukumar Swaminathan 
7036bafec742SSukumar Swaminathan 		/* if need to update port configuration */
7037accf27a5SSukumar Swaminathan 		if (update_port_config) {
7038accf27a5SSukumar Swaminathan 			mutex_enter(&qlge->mbx_mutex);
7039accf27a5SSukumar Swaminathan 			(void) ql_set_mpi_port_config(qlge,
7040accf27a5SSukumar Swaminathan 			    qlge->port_cfg_info);
7041accf27a5SSukumar Swaminathan 			mutex_exit(&qlge->mbx_mutex);
7042accf27a5SSukumar Swaminathan 		}
7043bafec742SSukumar Swaminathan 	} else
7044bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "ql_get_port_cfg failed");
7045bafec742SSukumar Swaminathan 
7046bafec742SSukumar Swaminathan 	/* Start up the rx queues. */
7047bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7048bafec742SSukumar Swaminathan 		status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
7049bafec742SSukumar Swaminathan 		if (status) {
7050bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
7051bafec742SSukumar Swaminathan 			    "Failed to start rx ring[%d]", i);
7052bafec742SSukumar Swaminathan 			return (status);
7053bafec742SSukumar Swaminathan 		}
7054bafec742SSukumar Swaminathan 	}
7055bafec742SSukumar Swaminathan 
7056bafec742SSukumar Swaminathan 	/*
7057bafec742SSukumar Swaminathan 	 * If there is more than one inbound completion queue
7058bafec742SSukumar Swaminathan 	 * then download a RICB to configure RSS.
7059bafec742SSukumar Swaminathan 	 */
7060bafec742SSukumar Swaminathan 	if (qlge->rss_ring_count > 1) {
7061bafec742SSukumar Swaminathan 		status = ql_start_rss(qlge);
7062bafec742SSukumar Swaminathan 		if (status) {
7063bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "Failed to start RSS.");
7064bafec742SSukumar Swaminathan 			return (status);
7065bafec742SSukumar Swaminathan 		}
7066bafec742SSukumar Swaminathan 	}
7067bafec742SSukumar Swaminathan 
7068bafec742SSukumar Swaminathan 	/* Start up the tx queues. */
7069bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7070bafec742SSukumar Swaminathan 		status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
7071bafec742SSukumar Swaminathan 		if (status) {
7072bafec742SSukumar Swaminathan 			cmn_err(CE_WARN,
7073bafec742SSukumar Swaminathan 			    "Failed to start tx ring[%d]", i);
7074bafec742SSukumar Swaminathan 			return (status);
7075bafec742SSukumar Swaminathan 		}
7076bafec742SSukumar Swaminathan 	}
7077bafec742SSukumar Swaminathan 	qlge->selected_tx_ring = 0;
7078bafec742SSukumar Swaminathan 	/* Set the frame routing filter. */
7079bafec742SSukumar Swaminathan 	status = ql_route_initialize(qlge);
7080bafec742SSukumar Swaminathan 	if (status) {
7081bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
7082bafec742SSukumar Swaminathan 		    "Failed to init CAM/Routing tables.");
7083bafec742SSukumar Swaminathan 		return (status);
7084bafec742SSukumar Swaminathan 	}
7085bafec742SSukumar Swaminathan 
7086bafec742SSukumar Swaminathan 	return (status);
7087bafec742SSukumar Swaminathan }
7088bafec742SSukumar Swaminathan /*
7089bafec742SSukumar Swaminathan  * Issue soft reset to chip.
7090bafec742SSukumar Swaminathan  */
7091bafec742SSukumar Swaminathan static int
ql_asic_reset(qlge_t * qlge)7092bafec742SSukumar Swaminathan ql_asic_reset(qlge_t *qlge)
7093bafec742SSukumar Swaminathan {
7094bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
7095bafec742SSukumar Swaminathan 
7096bafec742SSukumar Swaminathan 	ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
7097bafec742SSukumar Swaminathan 	    |FUNCTION_RESET);
7098bafec742SSukumar Swaminathan 
7099accf27a5SSukumar Swaminathan 	if (ql_wait_reg_bit(qlge, REG_RESET_FAILOVER, FUNCTION_RESET,
7100accf27a5SSukumar Swaminathan 	    BIT_RESET, 0) != DDI_SUCCESS) {
7101bafec742SSukumar Swaminathan 		cmn_err(CE_WARN,
7102bafec742SSukumar Swaminathan 		    "TIMEOUT!!! errored out of resetting the chip!");
7103bafec742SSukumar Swaminathan 		status = DDI_FAILURE;
7104bafec742SSukumar Swaminathan 	}
7105bafec742SSukumar Swaminathan 
7106bafec742SSukumar Swaminathan 	return (status);
7107bafec742SSukumar Swaminathan }
7108bafec742SSukumar Swaminathan 
7109bafec742SSukumar Swaminathan /*
7110bafec742SSukumar Swaminathan  * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
7111bafec742SSukumar Swaminathan  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7112bafec742SSukumar Swaminathan  * to be used by hardware.
7113bafec742SSukumar Swaminathan  */
7114bafec742SSukumar Swaminathan static void
ql_arm_sbuf(qlge_t * qlge,struct rx_ring * rx_ring)7115bafec742SSukumar Swaminathan ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7116bafec742SSukumar Swaminathan {
7117bafec742SSukumar Swaminathan 	struct bq_desc *sbq_desc;
7118bafec742SSukumar Swaminathan 	int i;
7119bafec742SSukumar Swaminathan 	uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
7120bafec742SSukumar Swaminathan 	uint32_t arm_count;
7121bafec742SSukumar Swaminathan 
7122bafec742SSukumar Swaminathan 	if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
7123bafec742SSukumar Swaminathan 		arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
7124bafec742SSukumar Swaminathan 	else {
7125bafec742SSukumar Swaminathan 		/* Adjust to a multiple of 16 */
7126bafec742SSukumar Swaminathan 		arm_count = (rx_ring->sbuf_free_count / 16) * 16;
7127bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
7128bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
7129bafec742SSukumar Swaminathan #endif
7130bafec742SSukumar Swaminathan 	}
7131bafec742SSukumar Swaminathan 	for (i = 0; i < arm_count; i++) {
7132bafec742SSukumar Swaminathan 		sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
7133bafec742SSukumar Swaminathan 		if (sbq_desc == NULL)
7134bafec742SSukumar Swaminathan 			break;
7135bafec742SSukumar Swaminathan 		/* Arm asic */
7136bafec742SSukumar Swaminathan 		*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
7137bafec742SSukumar Swaminathan 		sbq_entry++;
7138bafec742SSukumar Swaminathan 
7139bafec742SSukumar Swaminathan 		/* link the descriptors to in_use_list */
7140bafec742SSukumar Swaminathan 		ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
7141bafec742SSukumar Swaminathan 		rx_ring->sbq_prod_idx++;
7142bafec742SSukumar Swaminathan 	}
7143bafec742SSukumar Swaminathan 	ql_update_sbq_prod_idx(qlge, rx_ring);
7144bafec742SSukumar Swaminathan }
7145bafec742SSukumar Swaminathan 
7146bafec742SSukumar Swaminathan /*
7147bafec742SSukumar Swaminathan  * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
7148bafec742SSukumar Swaminathan  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7149bafec742SSukumar Swaminathan  * to be used by hardware.
7150bafec742SSukumar Swaminathan  */
7151bafec742SSukumar Swaminathan static void
ql_arm_lbuf(qlge_t * qlge,struct rx_ring * rx_ring)7152bafec742SSukumar Swaminathan ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7153bafec742SSukumar Swaminathan {
7154bafec742SSukumar Swaminathan 	struct bq_desc *lbq_desc;
7155bafec742SSukumar Swaminathan 	int i;
7156bafec742SSukumar Swaminathan 	uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
7157bafec742SSukumar Swaminathan 	uint32_t arm_count;
7158bafec742SSukumar Swaminathan 
7159bafec742SSukumar Swaminathan 	if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
7160bafec742SSukumar Swaminathan 		arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
7161bafec742SSukumar Swaminathan 	else {
7162bafec742SSukumar Swaminathan 		/* Adjust to a multiple of 16 */
7163bafec742SSukumar Swaminathan 		arm_count = (rx_ring->lbuf_free_count / 16) * 16;
7164bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
7165bafec742SSukumar Swaminathan 		cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
7166bafec742SSukumar Swaminathan #endif
7167bafec742SSukumar Swaminathan 	}
7168bafec742SSukumar Swaminathan 	for (i = 0; i < arm_count; i++) {
7169bafec742SSukumar Swaminathan 		lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
7170bafec742SSukumar Swaminathan 		if (lbq_desc == NULL)
7171bafec742SSukumar Swaminathan 			break;
7172bafec742SSukumar Swaminathan 		/* Arm asic */
7173bafec742SSukumar Swaminathan 		*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
7174bafec742SSukumar Swaminathan 		lbq_entry++;
7175bafec742SSukumar Swaminathan 
7176bafec742SSukumar Swaminathan 		/* link the descriptors to in_use_list */
7177bafec742SSukumar Swaminathan 		ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
7178bafec742SSukumar Swaminathan 		rx_ring->lbq_prod_idx++;
7179bafec742SSukumar Swaminathan 	}
7180bafec742SSukumar Swaminathan 	ql_update_lbq_prod_idx(qlge, rx_ring);
7181bafec742SSukumar Swaminathan }
7182bafec742SSukumar Swaminathan 
7183bafec742SSukumar Swaminathan 
7184bafec742SSukumar Swaminathan /*
7185bafec742SSukumar Swaminathan  * Initializes the adapter by configuring request and response queues,
7186bafec742SSukumar Swaminathan  * allocates and ARMs small and large receive buffers to the
7187bafec742SSukumar Swaminathan  * hardware
7188bafec742SSukumar Swaminathan  */
7189bafec742SSukumar Swaminathan static int
ql_bringup_adapter(qlge_t * qlge)7190bafec742SSukumar Swaminathan ql_bringup_adapter(qlge_t *qlge)
7191bafec742SSukumar Swaminathan {
7192bafec742SSukumar Swaminathan 	int i;
7193bafec742SSukumar Swaminathan 
7194bafec742SSukumar Swaminathan 	if (ql_device_initialize(qlge) != DDI_SUCCESS) {
7195bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
7196bafec742SSukumar Swaminathan 		    __func__, qlge->instance);
7197bafec742SSukumar Swaminathan 		goto err_bringup;
7198bafec742SSukumar Swaminathan 	}
7199bafec742SSukumar Swaminathan 	qlge->sequence |= INIT_ADAPTER_UP;
7200bafec742SSukumar Swaminathan 
7201bafec742SSukumar Swaminathan #ifdef QLGE_TRACK_BUFFER_USAGE
7202bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7203bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type != TX_Q) {
7204bafec742SSukumar Swaminathan 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
7205bafec742SSukumar Swaminathan 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
7206bafec742SSukumar Swaminathan 		}
7207bafec742SSukumar Swaminathan 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
7208bafec742SSukumar Swaminathan 	}
7209bafec742SSukumar Swaminathan #endif
7210bafec742SSukumar Swaminathan 	/* Arm buffers */
7211bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7212bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].type != TX_Q) {
7213bafec742SSukumar Swaminathan 			ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
7214bafec742SSukumar Swaminathan 			ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
7215bafec742SSukumar Swaminathan 		}
7216bafec742SSukumar Swaminathan 	}
7217bafec742SSukumar Swaminathan 
7218bafec742SSukumar Swaminathan 	/* Enable work/request queues */
7219bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7220bafec742SSukumar Swaminathan 		if (qlge->tx_ring[i].valid_db_reg)
7221bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7222bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].valid_db_reg,
7223bafec742SSukumar Swaminathan 			    REQ_Q_VALID);
7224bafec742SSukumar Swaminathan 	}
7225bafec742SSukumar Swaminathan 
7226bafec742SSukumar Swaminathan 	/* Enable completion queues */
7227bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7228bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].valid_db_reg)
7229bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7230bafec742SSukumar Swaminathan 			    qlge->rx_ring[i].valid_db_reg,
7231bafec742SSukumar Swaminathan 			    RSP_Q_VALID);
7232bafec742SSukumar Swaminathan 	}
7233bafec742SSukumar Swaminathan 
7234bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7235bafec742SSukumar Swaminathan 		mutex_enter(&qlge->tx_ring[i].tx_lock);
7236bafec742SSukumar Swaminathan 		qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
7237bafec742SSukumar Swaminathan 		mutex_exit(&qlge->tx_ring[i].tx_lock);
7238bafec742SSukumar Swaminathan 	}
7239bafec742SSukumar Swaminathan 
7240bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7241bafec742SSukumar Swaminathan 		mutex_enter(&qlge->rx_ring[i].rx_lock);
7242bafec742SSukumar Swaminathan 		qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
7243bafec742SSukumar Swaminathan 		mutex_exit(&qlge->rx_ring[i].rx_lock);
7244bafec742SSukumar Swaminathan 	}
7245bafec742SSukumar Swaminathan 
7246bafec742SSukumar Swaminathan 	/* This mutex will get re-acquired in enable_completion interrupt */
7247bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7248bafec742SSukumar Swaminathan 	/* Traffic can start flowing now */
7249bafec742SSukumar Swaminathan 	ql_enable_all_completion_interrupts(qlge);
7250bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7251bafec742SSukumar Swaminathan 
7252bafec742SSukumar Swaminathan 	ql_enable_global_interrupt(qlge);
7253bafec742SSukumar Swaminathan 
7254bafec742SSukumar Swaminathan 	qlge->sequence |= ADAPTER_INIT;
7255bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7256bafec742SSukumar Swaminathan 
7257bafec742SSukumar Swaminathan err_bringup:
72580662fbf4SSukumar Swaminathan 	(void) ql_asic_reset(qlge);
7259bafec742SSukumar Swaminathan 	return (DDI_FAILURE);
7260bafec742SSukumar Swaminathan }
7261bafec742SSukumar Swaminathan 
7262bafec742SSukumar Swaminathan /*
7263bafec742SSukumar Swaminathan  * Initialize mutexes of each rx/tx rings
7264bafec742SSukumar Swaminathan  */
7265bafec742SSukumar Swaminathan static int
ql_init_rx_tx_locks(qlge_t * qlge)7266bafec742SSukumar Swaminathan ql_init_rx_tx_locks(qlge_t *qlge)
7267bafec742SSukumar Swaminathan {
7268bafec742SSukumar Swaminathan 	struct tx_ring *tx_ring;
7269bafec742SSukumar Swaminathan 	struct rx_ring *rx_ring;
7270bafec742SSukumar Swaminathan 	int i;
7271bafec742SSukumar Swaminathan 
7272bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7273bafec742SSukumar Swaminathan 		tx_ring = &qlge->tx_ring[i];
7274bafec742SSukumar Swaminathan 		mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
7275bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7276bafec742SSukumar Swaminathan 	}
7277bafec742SSukumar Swaminathan 
7278bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7279bafec742SSukumar Swaminathan 		rx_ring = &qlge->rx_ring[i];
7280bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
7281bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7282bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
7283bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7284bafec742SSukumar Swaminathan 		mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
7285bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7286bafec742SSukumar Swaminathan 	}
7287bafec742SSukumar Swaminathan 
7288bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7289bafec742SSukumar Swaminathan }
7290bafec742SSukumar Swaminathan 
7291accf27a5SSukumar Swaminathan /*ARGSUSED*/
7292accf27a5SSukumar Swaminathan /*
7293accf27a5SSukumar Swaminathan  * Simply call pci_ereport_post which generates ereports for errors
7294accf27a5SSukumar Swaminathan  * that occur in the PCI local bus configuration status registers.
7295accf27a5SSukumar Swaminathan  */
7296accf27a5SSukumar Swaminathan static int
ql_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)7297accf27a5SSukumar Swaminathan ql_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7298accf27a5SSukumar Swaminathan {
7299accf27a5SSukumar Swaminathan 	pci_ereport_post(dip, err, NULL);
7300accf27a5SSukumar Swaminathan 	return (err->fme_status);
7301accf27a5SSukumar Swaminathan }
7302accf27a5SSukumar Swaminathan 
7303accf27a5SSukumar Swaminathan static void
ql_fm_init(qlge_t * qlge)7304accf27a5SSukumar Swaminathan ql_fm_init(qlge_t *qlge)
7305accf27a5SSukumar Swaminathan {
7306accf27a5SSukumar Swaminathan 	ddi_iblock_cookie_t iblk;
7307accf27a5SSukumar Swaminathan 
7308accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) entered, FMA capability %x\n",
7309accf27a5SSukumar Swaminathan 	    qlge->instance, qlge->fm_capabilities));
7310accf27a5SSukumar Swaminathan 	/*
7311accf27a5SSukumar Swaminathan 	 * Register capabilities with IO Fault Services. The capabilities
7312accf27a5SSukumar Swaminathan 	 * set above may not be supported by the parent nexus, in that case
7313accf27a5SSukumar Swaminathan 	 * some capability bits may be cleared.
7314accf27a5SSukumar Swaminathan 	 */
7315accf27a5SSukumar Swaminathan 	if (qlge->fm_capabilities)
7316accf27a5SSukumar Swaminathan 		ddi_fm_init(qlge->dip, &qlge->fm_capabilities, &iblk);
7317accf27a5SSukumar Swaminathan 
7318accf27a5SSukumar Swaminathan 	/*
7319accf27a5SSukumar Swaminathan 	 * Initialize pci ereport capabilities if ereport capable
7320accf27a5SSukumar Swaminathan 	 */
7321accf27a5SSukumar Swaminathan 	if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7322accf27a5SSukumar Swaminathan 	    DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7323accf27a5SSukumar Swaminathan 		pci_ereport_setup(qlge->dip);
7324accf27a5SSukumar Swaminathan 	}
7325accf27a5SSukumar Swaminathan 
7326accf27a5SSukumar Swaminathan 	/* Register error callback if error callback capable */
7327accf27a5SSukumar Swaminathan 	if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7328accf27a5SSukumar Swaminathan 		ddi_fm_handler_register(qlge->dip,
7329accf27a5SSukumar Swaminathan 		    ql_fm_error_cb, (void*) qlge);
7330accf27a5SSukumar Swaminathan 	}
7331accf27a5SSukumar Swaminathan 
7332accf27a5SSukumar Swaminathan 	/*
7333accf27a5SSukumar Swaminathan 	 * DDI_FLGERR_ACC indicates:
7334accf27a5SSukumar Swaminathan 	 *  Driver will check its access handle(s) for faults on
7335accf27a5SSukumar Swaminathan 	 *   a regular basis by calling ddi_fm_acc_err_get
7336accf27a5SSukumar Swaminathan 	 *  Driver is able to cope with incorrect results of I/O
7337accf27a5SSukumar Swaminathan 	 *   operations resulted from an I/O fault
7338accf27a5SSukumar Swaminathan 	 */
7339accf27a5SSukumar Swaminathan 	if (DDI_FM_ACC_ERR_CAP(qlge->fm_capabilities)) {
7340accf27a5SSukumar Swaminathan 		ql_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7341accf27a5SSukumar Swaminathan 	}
7342accf27a5SSukumar Swaminathan 
7343accf27a5SSukumar Swaminathan 	/*
7344accf27a5SSukumar Swaminathan 	 * DDI_DMA_FLAGERR indicates:
7345accf27a5SSukumar Swaminathan 	 *  Driver will check its DMA handle(s) for faults on a
7346accf27a5SSukumar Swaminathan 	 *   regular basis using ddi_fm_dma_err_get
7347accf27a5SSukumar Swaminathan 	 *  Driver is able to cope with incorrect results of DMA
7348accf27a5SSukumar Swaminathan 	 *   operations resulted from an I/O fault
7349accf27a5SSukumar Swaminathan 	 */
7350accf27a5SSukumar Swaminathan 	if (DDI_FM_DMA_ERR_CAP(qlge->fm_capabilities)) {
7351accf27a5SSukumar Swaminathan 		tx_mapping_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7352accf27a5SSukumar Swaminathan 		dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7353accf27a5SSukumar Swaminathan 	}
7354accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) done\n",
7355accf27a5SSukumar Swaminathan 	    qlge->instance));
7356accf27a5SSukumar Swaminathan }
7357accf27a5SSukumar Swaminathan 
7358accf27a5SSukumar Swaminathan static void
ql_fm_fini(qlge_t * qlge)7359accf27a5SSukumar Swaminathan ql_fm_fini(qlge_t *qlge)
7360accf27a5SSukumar Swaminathan {
7361accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) entered\n",
7362accf27a5SSukumar Swaminathan 	    qlge->instance));
7363accf27a5SSukumar Swaminathan 	/* Only unregister FMA capabilities if we registered some */
7364accf27a5SSukumar Swaminathan 	if (qlge->fm_capabilities) {
7365accf27a5SSukumar Swaminathan 
7366accf27a5SSukumar Swaminathan 		/*
7367accf27a5SSukumar Swaminathan 		 * Release any resources allocated by pci_ereport_setup()
7368accf27a5SSukumar Swaminathan 		 */
7369accf27a5SSukumar Swaminathan 		if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7370accf27a5SSukumar Swaminathan 		    DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7371accf27a5SSukumar Swaminathan 			pci_ereport_teardown(qlge->dip);
7372accf27a5SSukumar Swaminathan 
7373accf27a5SSukumar Swaminathan 		/*
7374accf27a5SSukumar Swaminathan 		 * Un-register error callback if error callback capable
7375accf27a5SSukumar Swaminathan 		 */
7376accf27a5SSukumar Swaminathan 		if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7377accf27a5SSukumar Swaminathan 			ddi_fm_handler_unregister(qlge->dip);
7378accf27a5SSukumar Swaminathan 
7379accf27a5SSukumar Swaminathan 		/* Unregister from IO Fault Services */
7380accf27a5SSukumar Swaminathan 		ddi_fm_fini(qlge->dip);
7381accf27a5SSukumar Swaminathan 	}
7382accf27a5SSukumar Swaminathan 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) done\n",
7383accf27a5SSukumar Swaminathan 	    qlge->instance));
7384accf27a5SSukumar Swaminathan }
7385bafec742SSukumar Swaminathan /*
7386bafec742SSukumar Swaminathan  * ql_attach - Driver attach.
7387bafec742SSukumar Swaminathan  */
7388bafec742SSukumar Swaminathan static int
ql_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)7389bafec742SSukumar Swaminathan ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
7390bafec742SSukumar Swaminathan {
7391bafec742SSukumar Swaminathan 	int instance;
7392accf27a5SSukumar Swaminathan 	qlge_t *qlge = NULL;
7393bafec742SSukumar Swaminathan 	int rval;
7394bafec742SSukumar Swaminathan 	uint16_t w;
7395bafec742SSukumar Swaminathan 	mac_register_t *macp = NULL;
7396accf27a5SSukumar Swaminathan 	uint32_t data;
7397accf27a5SSukumar Swaminathan 
7398bafec742SSukumar Swaminathan 	rval = DDI_FAILURE;
7399bafec742SSukumar Swaminathan 
7400bafec742SSukumar Swaminathan 	/* first get the instance */
7401bafec742SSukumar Swaminathan 	instance = ddi_get_instance(dip);
7402bafec742SSukumar Swaminathan 
7403bafec742SSukumar Swaminathan 	switch (cmd) {
7404bafec742SSukumar Swaminathan 	case DDI_ATTACH:
7405bafec742SSukumar Swaminathan 		/*
7406bafec742SSukumar Swaminathan 		 * Allocate our per-device-instance structure
7407bafec742SSukumar Swaminathan 		 */
7408bafec742SSukumar Swaminathan 		qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
7409bafec742SSukumar Swaminathan 		ASSERT(qlge != NULL);
7410bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_SOFTSTATE_ALLOC;
7411bafec742SSukumar Swaminathan 
7412bafec742SSukumar Swaminathan 		qlge->dip = dip;
7413bafec742SSukumar Swaminathan 		qlge->instance = instance;
7414accf27a5SSukumar Swaminathan 		/* Set up the coalescing parameters. */
7415accf27a5SSukumar Swaminathan 		qlge->ql_dbgprnt = 0;
7416accf27a5SSukumar Swaminathan #if QL_DEBUG
7417accf27a5SSukumar Swaminathan 		qlge->ql_dbgprnt = QL_DEBUG;
7418accf27a5SSukumar Swaminathan #endif /* QL_DEBUG */
7419accf27a5SSukumar Swaminathan 
7420accf27a5SSukumar Swaminathan 		/*
7421accf27a5SSukumar Swaminathan 		 * Initialize for fma support
7422accf27a5SSukumar Swaminathan 		 */
7423accf27a5SSukumar Swaminathan 		/* fault management (fm) capabilities. */
7424accf27a5SSukumar Swaminathan 		qlge->fm_capabilities =
7425accf27a5SSukumar Swaminathan 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE;
7426accf27a5SSukumar Swaminathan 		data = ql_get_prop(qlge, "fm-capable");
7427accf27a5SSukumar Swaminathan 		if (data <= 0xf) {
7428accf27a5SSukumar Swaminathan 			qlge->fm_capabilities = data;
7429accf27a5SSukumar Swaminathan 		}
7430accf27a5SSukumar Swaminathan 		ql_fm_init(qlge);
7431accf27a5SSukumar Swaminathan 		qlge->sequence |= INIT_FM;
7432accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("ql_attach(%d): fma init done\n",
7433accf27a5SSukumar Swaminathan 		    qlge->instance));
7434bafec742SSukumar Swaminathan 
7435bafec742SSukumar Swaminathan 		/*
7436bafec742SSukumar Swaminathan 		 * Setup the ISP8x00 registers address mapping to be
7437bafec742SSukumar Swaminathan 		 * accessed by this particular driver.
7438bafec742SSukumar Swaminathan 		 * 0x0   Configuration Space
7439bafec742SSukumar Swaminathan 		 * 0x1   I/O Space
7440bafec742SSukumar Swaminathan 		 * 0x2   1st Memory Space address - Control Register Set
7441bafec742SSukumar Swaminathan 		 * 0x3   2nd Memory Space address - Doorbell Memory Space
7442bafec742SSukumar Swaminathan 		 */
7443bafec742SSukumar Swaminathan 		w = 2;
7444bafec742SSukumar Swaminathan 		if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
7445bafec742SSukumar Swaminathan 		    sizeof (dev_reg_t), &ql_dev_acc_attr,
7446bafec742SSukumar Swaminathan 		    &qlge->dev_handle) != DDI_SUCCESS) {
7447bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to map device "
7448bafec742SSukumar Swaminathan 			    "registers", ADAPTER_NAME, instance);
7449bafec742SSukumar Swaminathan 			break;
7450bafec742SSukumar Swaminathan 		}
7451bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
7452bafec742SSukumar Swaminathan 		    qlge->iobase));
7453bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_REGS_SETUP;
7454bafec742SSukumar Swaminathan 
7455bafec742SSukumar Swaminathan 		/* map Doorbell memory space */
7456bafec742SSukumar Swaminathan 		w = 3;
7457bafec742SSukumar Swaminathan 		if (ddi_regs_map_setup(dip, w,
7458bafec742SSukumar Swaminathan 		    (caddr_t *)&qlge->doorbell_reg_iobase, 0,
7459bafec742SSukumar Swaminathan 		    0x100000 /* sizeof (dev_doorbell_reg_t) */,
7460bafec742SSukumar Swaminathan 		    &ql_dev_acc_attr,
7461bafec742SSukumar Swaminathan 		    &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
7462bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
7463bafec742SSukumar Swaminathan 			    "registers",
7464bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
7465bafec742SSukumar Swaminathan 			break;
7466bafec742SSukumar Swaminathan 		}
7467bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
7468bafec742SSukumar Swaminathan 		    qlge->doorbell_reg_iobase));
7469bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
7470bafec742SSukumar Swaminathan 
7471bafec742SSukumar Swaminathan 		/*
7472bafec742SSukumar Swaminathan 		 * Allocate a macinfo structure for this instance
7473bafec742SSukumar Swaminathan 		 */
7474bafec742SSukumar Swaminathan 		if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
7475bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
7476bafec742SSukumar Swaminathan 			    __func__, instance);
7477accf27a5SSukumar Swaminathan 			break;
7478bafec742SSukumar Swaminathan 		}
7479bafec742SSukumar Swaminathan 		/* save adapter status to dip private data */
7480bafec742SSukumar Swaminathan 		ddi_set_driver_private(dip, qlge);
7481bafec742SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
7482bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7483bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MAC_ALLOC;
7484bafec742SSukumar Swaminathan 
7485bafec742SSukumar Swaminathan 		/*
7486bafec742SSukumar Swaminathan 		 * Attach this instance of the device
7487bafec742SSukumar Swaminathan 		 */
7488bafec742SSukumar Swaminathan 		/* Setup PCI Local Bus Configuration resource. */
7489bafec742SSukumar Swaminathan 		if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
7490bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
7491bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
7492accf27a5SSukumar Swaminathan 			if (qlge->fm_enable) {
7493accf27a5SSukumar Swaminathan 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7494accf27a5SSukumar Swaminathan 				ddi_fm_service_impact(qlge->dip,
7495accf27a5SSukumar Swaminathan 				    DDI_SERVICE_LOST);
7496accf27a5SSukumar Swaminathan 			}
7497bafec742SSukumar Swaminathan 			break;
7498bafec742SSukumar Swaminathan 		}
7499bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_PCI_CONFIG_SETUP;
7500accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach(%d): pci_config_setup done\n",
7501accf27a5SSukumar Swaminathan 		    instance));
7502bafec742SSukumar Swaminathan 
7503bafec742SSukumar Swaminathan 		if (ql_init_instance(qlge) != DDI_SUCCESS) {
7504bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
7505bafec742SSukumar Swaminathan 			    "instance", ADAPTER_NAME, instance);
7506accf27a5SSukumar Swaminathan 			if (qlge->fm_enable) {
7507accf27a5SSukumar Swaminathan 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7508accf27a5SSukumar Swaminathan 				ddi_fm_service_impact(qlge->dip,
7509accf27a5SSukumar Swaminathan 				    DDI_SERVICE_LOST);
7510accf27a5SSukumar Swaminathan 			}
7511bafec742SSukumar Swaminathan 			break;
7512bafec742SSukumar Swaminathan 		}
7513accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_init_instance done\n",
7514accf27a5SSukumar Swaminathan 		    instance));
7515bafec742SSukumar Swaminathan 
7516bafec742SSukumar Swaminathan 		/* Setup interrupt vectors */
7517bafec742SSukumar Swaminathan 		if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
7518bafec742SSukumar Swaminathan 			break;
7519bafec742SSukumar Swaminathan 		}
7520bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_INTR_ALLOC;
7521accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_alloc_irqs done\n",
7522accf27a5SSukumar Swaminathan 		    instance));
7523bafec742SSukumar Swaminathan 
7524bafec742SSukumar Swaminathan 		/* Configure queues */
7525bafec742SSukumar Swaminathan 		if (ql_setup_rings(qlge) != DDI_SUCCESS) {
7526bafec742SSukumar Swaminathan 			break;
7527bafec742SSukumar Swaminathan 		}
7528bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_SETUP_RINGS;
7529accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_attach(%d): setup rings done\n",
7530accf27a5SSukumar Swaminathan 		    instance));
7531accf27a5SSukumar Swaminathan 
7532bafec742SSukumar Swaminathan 		/*
7533accf27a5SSukumar Swaminathan 		 * Allocate memory resources
7534bafec742SSukumar Swaminathan 		 */
7535accf27a5SSukumar Swaminathan 		if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
7536accf27a5SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): memory allocation failed",
7537accf27a5SSukumar Swaminathan 			    __func__, qlge->instance);
7538bafec742SSukumar Swaminathan 			break;
7539bafec742SSukumar Swaminathan 		}
7540accf27a5SSukumar Swaminathan 		qlge->sequence |= INIT_MEMORY_ALLOC;
7541accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("ql_alloc_mem_resources(%d) done\n",
7542accf27a5SSukumar Swaminathan 		    instance));
7543bafec742SSukumar Swaminathan 
7544accf27a5SSukumar Swaminathan 		/*
7545accf27a5SSukumar Swaminathan 		 * Map queues to interrupt vectors
7546accf27a5SSukumar Swaminathan 		 */
7547accf27a5SSukumar Swaminathan 		ql_resolve_queues_to_irqs(qlge);
7548bafec742SSukumar Swaminathan 
7549bafec742SSukumar Swaminathan 		/* Initialize mutex, need the interrupt priority */
75500662fbf4SSukumar Swaminathan 		(void) ql_init_rx_tx_locks(qlge);
7551bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_LOCKS_CREATED;
7552accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_rx_tx_locks done\n",
7553accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7554bafec742SSukumar Swaminathan 
7555bafec742SSukumar Swaminathan 		/*
7556bafec742SSukumar Swaminathan 		 * Use a soft interrupt to do something that we do not want
7557bafec742SSukumar Swaminathan 		 * to do in regular network functions or with mutexs being held
7558bafec742SSukumar Swaminathan 		 */
7559bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
7560bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
7561bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
7562bafec742SSukumar Swaminathan 			break;
7563bafec742SSukumar Swaminathan 		}
7564bafec742SSukumar Swaminathan 
7565bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
7566bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
7567bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
7568bafec742SSukumar Swaminathan 			break;
7569bafec742SSukumar Swaminathan 		}
7570bafec742SSukumar Swaminathan 
7571bafec742SSukumar Swaminathan 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
7572bafec742SSukumar Swaminathan 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
7573bafec742SSukumar Swaminathan 		    != DDI_SUCCESS) {
7574bafec742SSukumar Swaminathan 			break;
7575bafec742SSukumar Swaminathan 		}
7576bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
7577accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): ddi_intr_add_softint done\n",
7578accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7579bafec742SSukumar Swaminathan 
7580bafec742SSukumar Swaminathan 		/*
7581bafec742SSukumar Swaminathan 		 * mutex to protect the adapter state structure.
7582bafec742SSukumar Swaminathan 		 * initialize mutexes according to the interrupt priority
7583bafec742SSukumar Swaminathan 		 */
7584bafec742SSukumar Swaminathan 		mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
7585bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7586bafec742SSukumar Swaminathan 		mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
7587bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7588bafec742SSukumar Swaminathan 		mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
7589bafec742SSukumar Swaminathan 		    DDI_INTR_PRI(qlge->intr_pri));
7590bafec742SSukumar Swaminathan 
7591bafec742SSukumar Swaminathan 		/* Mailbox wait and interrupt conditional variable. */
7592bafec742SSukumar Swaminathan 		cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
7593bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MUTEX;
7594accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): mutex_init done\n",
7595accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7596bafec742SSukumar Swaminathan 
7597bafec742SSukumar Swaminathan 		/*
7598bafec742SSukumar Swaminathan 		 * KStats
7599bafec742SSukumar Swaminathan 		 */
7600bafec742SSukumar Swaminathan 		if (ql_init_kstats(qlge) != DDI_SUCCESS) {
7601bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): KState initialization failed",
7602bafec742SSukumar Swaminathan 			    ADAPTER_NAME, instance);
7603bafec742SSukumar Swaminathan 			break;
7604bafec742SSukumar Swaminathan 		}
7605bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_KSTATS;
7606accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_kstats done\n",
7607accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7608bafec742SSukumar Swaminathan 
7609bafec742SSukumar Swaminathan 		/*
7610bafec742SSukumar Swaminathan 		 * Initialize gld macinfo structure
7611bafec742SSukumar Swaminathan 		 */
7612bafec742SSukumar Swaminathan 		ql_gld3_init(qlge, macp);
7613accf27a5SSukumar Swaminathan 		/*
7614accf27a5SSukumar Swaminathan 		 * Add interrupt handlers
7615accf27a5SSukumar Swaminathan 		 */
7616accf27a5SSukumar Swaminathan 		if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
7617accf27a5SSukumar Swaminathan 			cmn_err(CE_WARN, "Failed to add interrupt "
7618accf27a5SSukumar Swaminathan 			    "handlers");
7619accf27a5SSukumar Swaminathan 			break;
7620accf27a5SSukumar Swaminathan 		}
7621accf27a5SSukumar Swaminathan 		qlge->sequence |= INIT_ADD_INTERRUPT;
7622accf27a5SSukumar Swaminathan 		QL_PRINT(DBG_INIT, ("%s(%d): Add interrupt handler done\n",
7623accf27a5SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7624bafec742SSukumar Swaminathan 
7625accf27a5SSukumar Swaminathan 		/*
7626accf27a5SSukumar Swaminathan 		 * MAC Register
7627accf27a5SSukumar Swaminathan 		 */
7628bafec742SSukumar Swaminathan 		if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
7629bafec742SSukumar Swaminathan 			cmn_err(CE_WARN, "%s(%d): mac_register failed",
7630bafec742SSukumar Swaminathan 			    __func__, instance);
7631bafec742SSukumar Swaminathan 			break;
7632bafec742SSukumar Swaminathan 		}
7633bafec742SSukumar Swaminathan 		qlge->sequence |= INIT_MAC_REGISTERED;
7634bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
7635bafec742SSukumar Swaminathan 		    ADAPTER_NAME, instance));
7636bafec742SSukumar Swaminathan 
7637bafec742SSukumar Swaminathan 		mac_free(macp);
7638bafec742SSukumar Swaminathan 		macp = NULL;
7639bafec742SSukumar Swaminathan 
7640bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_ATTACHED;
7641bafec742SSukumar Swaminathan 
7642bafec742SSukumar Swaminathan 		ddi_report_dev(dip);
7643bafec742SSukumar Swaminathan 
7644bafec742SSukumar Swaminathan 		rval = DDI_SUCCESS;
7645accf27a5SSukumar Swaminathan 
7646bafec742SSukumar Swaminathan 	break;
7647bafec742SSukumar Swaminathan /*
7648bafec742SSukumar Swaminathan  * DDI_RESUME
7649bafec742SSukumar Swaminathan  * When called  with  cmd  set  to  DDI_RESUME,  attach()  must
7650bafec742SSukumar Swaminathan  * restore  the hardware state of a device (power may have been
7651bafec742SSukumar Swaminathan  * removed from the device), allow  pending  requests  to  con-
7652bafec742SSukumar Swaminathan  * tinue,  and  service  new requests. In this case, the driver
7653bafec742SSukumar Swaminathan  * must not  make  any  assumptions  about  the  state  of  the
7654bafec742SSukumar Swaminathan  * hardware,  but  must  restore the state of the device except
7655bafec742SSukumar Swaminathan  * for the power level of components.
7656bafec742SSukumar Swaminathan  *
7657bafec742SSukumar Swaminathan  */
7658bafec742SSukumar Swaminathan 	case DDI_RESUME:
7659bafec742SSukumar Swaminathan 
7660bafec742SSukumar Swaminathan 		if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
7661bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7662bafec742SSukumar Swaminathan 
7663bafec742SSukumar Swaminathan 		QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
7664bafec742SSukumar Swaminathan 		    __func__, qlge->instance));
7665bafec742SSukumar Swaminathan 
7666bafec742SSukumar Swaminathan 		mutex_enter(&qlge->gen_mutex);
7667bafec742SSukumar Swaminathan 		rval = ql_do_start(qlge);
7668bafec742SSukumar Swaminathan 		mutex_exit(&qlge->gen_mutex);
7669bafec742SSukumar Swaminathan 		break;
7670bafec742SSukumar Swaminathan 
7671bafec742SSukumar Swaminathan 	default:
7672bafec742SSukumar Swaminathan 		break;
7673bafec742SSukumar Swaminathan 	}
7674accf27a5SSukumar Swaminathan 
7675accf27a5SSukumar Swaminathan 	/* if failed to attach */
7676accf27a5SSukumar Swaminathan 	if ((cmd == DDI_ATTACH) && (rval != DDI_SUCCESS) && (qlge != NULL)) {
7677accf27a5SSukumar Swaminathan 		cmn_err(CE_WARN, "qlge driver attach failed, sequence %x",
7678accf27a5SSukumar Swaminathan 		    qlge->sequence);
7679accf27a5SSukumar Swaminathan 		ql_free_resources(qlge);
7680accf27a5SSukumar Swaminathan 	}
7681accf27a5SSukumar Swaminathan 
7682bafec742SSukumar Swaminathan 	return (rval);
7683bafec742SSukumar Swaminathan }
7684bafec742SSukumar Swaminathan 
7685bafec742SSukumar Swaminathan /*
7686bafec742SSukumar Swaminathan  * Unbind all pending tx dma handles during driver bring down
7687bafec742SSukumar Swaminathan  */
7688bafec742SSukumar Swaminathan static void
ql_unbind_pending_tx_dma_handle(struct tx_ring * tx_ring)7689bafec742SSukumar Swaminathan ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
7690bafec742SSukumar Swaminathan {
7691bafec742SSukumar Swaminathan 	struct tx_ring_desc *tx_ring_desc;
7692bafec742SSukumar Swaminathan 	int i, j;
7693bafec742SSukumar Swaminathan 
7694bafec742SSukumar Swaminathan 	if (tx_ring->wq_desc) {
7695bafec742SSukumar Swaminathan 		tx_ring_desc = tx_ring->wq_desc;
7696bafec742SSukumar Swaminathan 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
7697bafec742SSukumar Swaminathan 			for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
7698bafec742SSukumar Swaminathan 				if (tx_ring_desc->tx_dma_handle[j]) {
7699bafec742SSukumar Swaminathan 					(void) ddi_dma_unbind_handle(
7700bafec742SSukumar Swaminathan 					    tx_ring_desc->tx_dma_handle[j]);
7701bafec742SSukumar Swaminathan 				}
7702bafec742SSukumar Swaminathan 			}
7703bafec742SSukumar Swaminathan 			tx_ring_desc->tx_dma_handle_used = 0;
7704bafec742SSukumar Swaminathan 		} /* end of for loop */
7705bafec742SSukumar Swaminathan 	}
7706bafec742SSukumar Swaminathan }
7707bafec742SSukumar Swaminathan /*
7708bafec742SSukumar Swaminathan  * Wait for all the packets sent to the chip to finish transmission
7709bafec742SSukumar Swaminathan  * to prevent buffers to be unmapped before or during a transmit operation
7710bafec742SSukumar Swaminathan  */
7711bafec742SSukumar Swaminathan static int
ql_wait_tx_quiesce(qlge_t * qlge)7712bafec742SSukumar Swaminathan ql_wait_tx_quiesce(qlge_t *qlge)
7713bafec742SSukumar Swaminathan {
7714bafec742SSukumar Swaminathan 	int count = MAX_TX_WAIT_COUNT, i;
7715bafec742SSukumar Swaminathan 	int rings_done;
7716bafec742SSukumar Swaminathan 	volatile struct tx_ring *tx_ring;
7717bafec742SSukumar Swaminathan 	uint32_t consumer_idx;
7718bafec742SSukumar Swaminathan 	uint32_t producer_idx;
7719bafec742SSukumar Swaminathan 	uint32_t temp;
7720bafec742SSukumar Swaminathan 	int done = 0;
7721bafec742SSukumar Swaminathan 	int rval = DDI_FAILURE;
7722bafec742SSukumar Swaminathan 
7723bafec742SSukumar Swaminathan 	while (!done) {
7724bafec742SSukumar Swaminathan 		rings_done = 0;
7725bafec742SSukumar Swaminathan 
7726bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7727bafec742SSukumar Swaminathan 			tx_ring = &qlge->tx_ring[i];
7728bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
7729bafec742SSukumar Swaminathan 			    tx_ring->prod_idx_db_reg);
7730bafec742SSukumar Swaminathan 			producer_idx = temp & 0x0000ffff;
7731bafec742SSukumar Swaminathan 			consumer_idx = (temp >> 16);
7732bafec742SSukumar Swaminathan 
7733accf27a5SSukumar Swaminathan 			if (qlge->isr_stride) {
7734accf27a5SSukumar Swaminathan 				struct rx_ring *ob_ring;
7735accf27a5SSukumar Swaminathan 				ob_ring = &qlge->rx_ring[tx_ring->cq_id];
7736accf27a5SSukumar Swaminathan 				if (producer_idx != ob_ring->cnsmr_idx) {
7737accf27a5SSukumar Swaminathan 					cmn_err(CE_NOTE, " force clean \n");
7738accf27a5SSukumar Swaminathan 					(void) ql_clean_outbound_rx_ring(
7739accf27a5SSukumar Swaminathan 					    ob_ring);
7740accf27a5SSukumar Swaminathan 				}
7741accf27a5SSukumar Swaminathan 			}
7742bafec742SSukumar Swaminathan 			/*
7743bafec742SSukumar Swaminathan 			 * Get the pending iocb count, ones which have not been
7744bafec742SSukumar Swaminathan 			 * pulled down by the chip
7745bafec742SSukumar Swaminathan 			 */
7746bafec742SSukumar Swaminathan 			if (producer_idx >= consumer_idx)
7747bafec742SSukumar Swaminathan 				temp = (producer_idx - consumer_idx);
7748bafec742SSukumar Swaminathan 			else
7749bafec742SSukumar Swaminathan 				temp = (tx_ring->wq_len - consumer_idx) +
7750bafec742SSukumar Swaminathan 				    producer_idx;
7751bafec742SSukumar Swaminathan 
7752bafec742SSukumar Swaminathan 			if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
7753bafec742SSukumar Swaminathan 				rings_done++;
7754bafec742SSukumar Swaminathan 			else {
7755bafec742SSukumar Swaminathan 				done = 1;
7756bafec742SSukumar Swaminathan 				break;
7757bafec742SSukumar Swaminathan 			}
7758bafec742SSukumar Swaminathan 		}
7759bafec742SSukumar Swaminathan 
7760bafec742SSukumar Swaminathan 		/* If all the rings are done */
7761bafec742SSukumar Swaminathan 		if (rings_done >= qlge->tx_ring_count) {
7762bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
7763bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d) done successfully \n",
7764bafec742SSukumar Swaminathan 			    __func__, qlge->instance);
7765bafec742SSukumar Swaminathan #endif
7766bafec742SSukumar Swaminathan 			rval = DDI_SUCCESS;
7767bafec742SSukumar Swaminathan 			break;
7768bafec742SSukumar Swaminathan 		}
7769bafec742SSukumar Swaminathan 
7770bafec742SSukumar Swaminathan 		qlge_delay(100);
7771bafec742SSukumar Swaminathan 
7772bafec742SSukumar Swaminathan 		count--;
7773bafec742SSukumar Swaminathan 		if (!count) {
7774bafec742SSukumar Swaminathan 
7775bafec742SSukumar Swaminathan 			count = MAX_TX_WAIT_COUNT;
7776bafec742SSukumar Swaminathan #ifdef QLGE_LOAD_UNLOAD
7777bafec742SSukumar Swaminathan 			volatile struct rx_ring *rx_ring;
7778bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
7779bafec742SSukumar Swaminathan 			    " Transmits on queue %d to complete .\n",
7780bafec742SSukumar Swaminathan 			    __func__, qlge->instance,
7781bafec742SSukumar Swaminathan 			    (qlge->tx_ring[i].wq_len -
7782bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].tx_free_count),
7783bafec742SSukumar Swaminathan 			    i);
7784bafec742SSukumar Swaminathan 
7785bafec742SSukumar Swaminathan 			rx_ring = &qlge->rx_ring[i+1];
7786bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
7787bafec742SSukumar Swaminathan 			    rx_ring->cnsmr_idx_db_reg);
7788bafec742SSukumar Swaminathan 			consumer_idx = temp & 0x0000ffff;
7789bafec742SSukumar Swaminathan 			producer_idx = (temp >> 16);
7790bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
7791bafec742SSukumar Swaminathan 			    " Producer %d, Consumer %d\n",
7792bafec742SSukumar Swaminathan 			    __func__, qlge->instance,
7793bafec742SSukumar Swaminathan 			    i+1,
7794bafec742SSukumar Swaminathan 			    producer_idx, consumer_idx);
7795bafec742SSukumar Swaminathan 
7796bafec742SSukumar Swaminathan 			temp = ql_read_doorbell_reg(qlge,
7797bafec742SSukumar Swaminathan 			    tx_ring->prod_idx_db_reg);
7798bafec742SSukumar Swaminathan 			producer_idx = temp & 0x0000ffff;
7799bafec742SSukumar Swaminathan 			consumer_idx = (temp >> 16);
7800bafec742SSukumar Swaminathan 			cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
7801bafec742SSukumar Swaminathan 			    " Producer %d, Consumer %d\n",
7802bafec742SSukumar Swaminathan 			    __func__, qlge->instance, i,
7803bafec742SSukumar Swaminathan 			    producer_idx, consumer_idx);
7804bafec742SSukumar Swaminathan #endif
7805bafec742SSukumar Swaminathan 
7806bafec742SSukumar Swaminathan 			/* For now move on */
7807bafec742SSukumar Swaminathan 			break;
7808bafec742SSukumar Swaminathan 		}
7809bafec742SSukumar Swaminathan 	}
7810bafec742SSukumar Swaminathan 	/* Stop the request queue */
7811bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7812bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->tx_ring_count; i++) {
7813bafec742SSukumar Swaminathan 		if (qlge->tx_ring[i].valid_db_reg) {
7814bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7815bafec742SSukumar Swaminathan 			    qlge->tx_ring[i].valid_db_reg, 0);
7816bafec742SSukumar Swaminathan 		}
7817bafec742SSukumar Swaminathan 	}
7818bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7819bafec742SSukumar Swaminathan 	return (rval);
7820bafec742SSukumar Swaminathan }
7821bafec742SSukumar Swaminathan 
7822bafec742SSukumar Swaminathan /*
7823bafec742SSukumar Swaminathan  * Wait for all the receives indicated to the stack to come back
7824bafec742SSukumar Swaminathan  */
7825bafec742SSukumar Swaminathan static int
ql_wait_rx_complete(qlge_t * qlge)7826bafec742SSukumar Swaminathan ql_wait_rx_complete(qlge_t *qlge)
7827bafec742SSukumar Swaminathan {
7828bafec742SSukumar Swaminathan 	int i;
7829bafec742SSukumar Swaminathan 	/* Disable all the completion queues */
7830bafec742SSukumar Swaminathan 	mutex_enter(&qlge->hw_mutex);
7831bafec742SSukumar Swaminathan 	for (i = 0; i < qlge->rx_ring_count; i++) {
7832bafec742SSukumar Swaminathan 		if (qlge->rx_ring[i].valid_db_reg) {
7833bafec742SSukumar Swaminathan 			ql_write_doorbell_reg(qlge,
7834bafec742SSukumar Swaminathan 			    qlge->rx_ring[i].valid_db_reg, 0);
7835bafec742SSukumar Swaminathan 		}
7836bafec742SSukumar Swaminathan 	}
7837bafec742SSukumar Swaminathan 	mutex_exit(&qlge->hw_mutex);
7838bafec742SSukumar Swaminathan 
7839bafec742SSukumar Swaminathan 	/* Wait for OS to return all rx buffers */
7840bafec742SSukumar Swaminathan 	qlge_delay(QL_ONE_SEC_DELAY);
7841bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
7842bafec742SSukumar Swaminathan }
7843bafec742SSukumar Swaminathan 
7844bafec742SSukumar Swaminathan /*
7845bafec742SSukumar Swaminathan  * stop the driver
7846bafec742SSukumar Swaminathan  */
7847bafec742SSukumar Swaminathan static int
ql_bringdown_adapter(qlge_t * qlge)7848bafec742SSukumar Swaminathan ql_bringdown_adapter(qlge_t *qlge)
7849bafec742SSukumar Swaminathan {
7850bafec742SSukumar Swaminathan 	int i;
7851bafec742SSukumar Swaminathan 	int status = DDI_SUCCESS;
7852bafec742SSukumar Swaminathan 
7853bafec742SSukumar Swaminathan 	qlge->mac_flags = QL_MAC_BRINGDOWN;
7854bafec742SSukumar Swaminathan 	if (qlge->sequence & ADAPTER_INIT) {
7855bafec742SSukumar Swaminathan 		/* stop forwarding external packets to driver */
7856bafec742SSukumar Swaminathan 		status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7857bafec742SSukumar Swaminathan 		if (status)
7858bafec742SSukumar Swaminathan 			return (status);
78590662fbf4SSukumar Swaminathan 		(void) ql_stop_routing(qlge);
7860bafec742SSukumar Swaminathan 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7861bafec742SSukumar Swaminathan 		/*
7862bafec742SSukumar Swaminathan 		 * Set the flag for receive and transmit
7863bafec742SSukumar Swaminathan 		 * operations to cease
7864bafec742SSukumar Swaminathan 		 */
7865bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7866bafec742SSukumar Swaminathan 			mutex_enter(&qlge->tx_ring[i].tx_lock);
7867bafec742SSukumar Swaminathan 			qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7868bafec742SSukumar Swaminathan 			mutex_exit(&qlge->tx_ring[i].tx_lock);
7869bafec742SSukumar Swaminathan 		}
7870bafec742SSukumar Swaminathan 
7871bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
7872bafec742SSukumar Swaminathan 			mutex_enter(&qlge->rx_ring[i].rx_lock);
7873bafec742SSukumar Swaminathan 			qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7874bafec742SSukumar Swaminathan 			mutex_exit(&qlge->rx_ring[i].rx_lock);
7875bafec742SSukumar Swaminathan 		}
7876bafec742SSukumar Swaminathan 
7877bafec742SSukumar Swaminathan 		/*
7878bafec742SSukumar Swaminathan 		 * Need interrupts to be running while the transmit
7879bafec742SSukumar Swaminathan 		 * completions are cleared. Wait for the packets
7880bafec742SSukumar Swaminathan 		 * queued to the chip to be sent out
7881bafec742SSukumar Swaminathan 		 */
7882bafec742SSukumar Swaminathan 		(void) ql_wait_tx_quiesce(qlge);
7883bafec742SSukumar Swaminathan 		/* Interrupts not needed from now */
7884bafec742SSukumar Swaminathan 		ql_disable_all_completion_interrupts(qlge);
7885bafec742SSukumar Swaminathan 
7886bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
7887bafec742SSukumar Swaminathan 		/* Disable Global interrupt */
7888bafec742SSukumar Swaminathan 		ql_disable_global_interrupt(qlge);
7889bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
7890bafec742SSukumar Swaminathan 
7891bafec742SSukumar Swaminathan 		/* Wait for all the indicated packets to come back */
7892bafec742SSukumar Swaminathan 		status = ql_wait_rx_complete(qlge);
7893bafec742SSukumar Swaminathan 
7894bafec742SSukumar Swaminathan 		mutex_enter(&qlge->hw_mutex);
7895bafec742SSukumar Swaminathan 		/* Reset adapter */
78960662fbf4SSukumar Swaminathan 		(void) ql_asic_reset(qlge);
7897bafec742SSukumar Swaminathan 		/*
7898bafec742SSukumar Swaminathan 		 * Unbind all tx dma handles to prevent pending tx descriptors'
7899bafec742SSukumar Swaminathan 		 * dma handles from being re-used.
7900bafec742SSukumar Swaminathan 		 */
7901bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7902bafec742SSukumar Swaminathan 			ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7903bafec742SSukumar Swaminathan 		}
7904bafec742SSukumar Swaminathan 
7905bafec742SSukumar Swaminathan 		qlge->sequence &= ~ADAPTER_INIT;
7906bafec742SSukumar Swaminathan 
7907bafec742SSukumar Swaminathan 		mutex_exit(&qlge->hw_mutex);
7908bafec742SSukumar Swaminathan 	}
7909bafec742SSukumar Swaminathan 	return (status);
7910bafec742SSukumar Swaminathan }
7911bafec742SSukumar Swaminathan 
7912bafec742SSukumar Swaminathan /*
7913bafec742SSukumar Swaminathan  * ql_detach
7914bafec742SSukumar Swaminathan  * Used to remove all the states associated with a given
7915bafec742SSukumar Swaminathan  * instances of a device node prior to the removal of that
7916bafec742SSukumar Swaminathan  * instance from the system.
7917bafec742SSukumar Swaminathan  */
7918bafec742SSukumar Swaminathan static int
ql_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)7919bafec742SSukumar Swaminathan ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7920bafec742SSukumar Swaminathan {
7921bafec742SSukumar Swaminathan 	qlge_t *qlge;
7922bafec742SSukumar Swaminathan 	int rval;
7923bafec742SSukumar Swaminathan 
7924bafec742SSukumar Swaminathan 	rval = DDI_SUCCESS;
7925bafec742SSukumar Swaminathan 
7926bafec742SSukumar Swaminathan 	switch (cmd) {
7927bafec742SSukumar Swaminathan 	case DDI_DETACH:
7928bafec742SSukumar Swaminathan 
7929bafec742SSukumar Swaminathan 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7930bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7931bafec742SSukumar Swaminathan 		rval = ql_bringdown_adapter(qlge);
7932bafec742SSukumar Swaminathan 		if (rval != DDI_SUCCESS)
7933bafec742SSukumar Swaminathan 			break;
7934bafec742SSukumar Swaminathan 
7935bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_DETACH;
7936bafec742SSukumar Swaminathan 
7937bafec742SSukumar Swaminathan 		/* free memory resources */
7938bafec742SSukumar Swaminathan 		if (qlge->sequence & INIT_MEMORY_ALLOC) {
7939bafec742SSukumar Swaminathan 			ql_free_mem_resources(qlge);
7940bafec742SSukumar Swaminathan 			qlge->sequence &= ~INIT_MEMORY_ALLOC;
7941bafec742SSukumar Swaminathan 		}
7942accf27a5SSukumar Swaminathan 		ql_free_resources(qlge);
7943bafec742SSukumar Swaminathan 
7944bafec742SSukumar Swaminathan 		break;
7945bafec742SSukumar Swaminathan 
7946bafec742SSukumar Swaminathan 	case DDI_SUSPEND:
7947bafec742SSukumar Swaminathan 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7948bafec742SSukumar Swaminathan 			return (DDI_FAILURE);
7949bafec742SSukumar Swaminathan 
7950bafec742SSukumar Swaminathan 		mutex_enter(&qlge->gen_mutex);
7951bafec742SSukumar Swaminathan 		if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7952bafec742SSukumar Swaminathan 		    (qlge->mac_flags == QL_MAC_STARTED)) {
79530662fbf4SSukumar Swaminathan 			(void) ql_do_stop(qlge);
7954bafec742SSukumar Swaminathan 		}
7955bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_SUSPENDED;
7956bafec742SSukumar Swaminathan 		mutex_exit(&qlge->gen_mutex);
7957bafec742SSukumar Swaminathan 
7958bafec742SSukumar Swaminathan 		break;
7959bafec742SSukumar Swaminathan 	default:
7960bafec742SSukumar Swaminathan 		rval = DDI_FAILURE;
7961bafec742SSukumar Swaminathan 		break;
7962bafec742SSukumar Swaminathan 	}
7963bafec742SSukumar Swaminathan 
7964bafec742SSukumar Swaminathan 	return (rval);
7965bafec742SSukumar Swaminathan }
7966bafec742SSukumar Swaminathan 
7967bafec742SSukumar Swaminathan /*
7968bafec742SSukumar Swaminathan  * quiesce(9E) entry point.
7969bafec742SSukumar Swaminathan  *
7970bafec742SSukumar Swaminathan  * This function is called when the system is single-threaded at high
7971bafec742SSukumar Swaminathan  * PIL with preemption disabled. Therefore, this function must not be
7972bafec742SSukumar Swaminathan  * blocked.
7973bafec742SSukumar Swaminathan  *
7974bafec742SSukumar Swaminathan  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7975bafec742SSukumar Swaminathan  */
7976bafec742SSukumar Swaminathan int
ql_quiesce(dev_info_t * dip)7977bafec742SSukumar Swaminathan ql_quiesce(dev_info_t *dip)
7978bafec742SSukumar Swaminathan {
7979bafec742SSukumar Swaminathan 	qlge_t *qlge;
7980bafec742SSukumar Swaminathan 	int i;
7981bafec742SSukumar Swaminathan 
7982bafec742SSukumar Swaminathan 	if ((qlge = QL_GET_DEV(dip)) == NULL)
7983bafec742SSukumar Swaminathan 		return (DDI_FAILURE);
7984bafec742SSukumar Swaminathan 
7985bafec742SSukumar Swaminathan 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
7986bafec742SSukumar Swaminathan 		/* stop forwarding external packets to driver */
79870662fbf4SSukumar Swaminathan 		(void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
79880662fbf4SSukumar Swaminathan 		(void) ql_stop_routing(qlge);
7989bafec742SSukumar Swaminathan 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7990bafec742SSukumar Swaminathan 		/* Stop all the request queues */
7991bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->tx_ring_count; i++) {
7992bafec742SSukumar Swaminathan 			if (qlge->tx_ring[i].valid_db_reg) {
7993bafec742SSukumar Swaminathan 				ql_write_doorbell_reg(qlge,
7994bafec742SSukumar Swaminathan 				    qlge->tx_ring[i].valid_db_reg, 0);
7995bafec742SSukumar Swaminathan 			}
7996bafec742SSukumar Swaminathan 		}
7997bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY/4);
7998bafec742SSukumar Swaminathan 		/* Interrupts not needed from now */
7999bafec742SSukumar Swaminathan 		/* Disable MPI interrupt */
8000bafec742SSukumar Swaminathan 		ql_write_reg(qlge, REG_INTERRUPT_MASK,
8001bafec742SSukumar Swaminathan 		    (INTR_MASK_PI << 16));
8002bafec742SSukumar Swaminathan 		ql_disable_global_interrupt(qlge);
8003bafec742SSukumar Swaminathan 
8004bafec742SSukumar Swaminathan 		/* Disable all the rx completion queues */
8005bafec742SSukumar Swaminathan 		for (i = 0; i < qlge->rx_ring_count; i++) {
8006bafec742SSukumar Swaminathan 			if (qlge->rx_ring[i].valid_db_reg) {
8007bafec742SSukumar Swaminathan 				ql_write_doorbell_reg(qlge,
8008bafec742SSukumar Swaminathan 				    qlge->rx_ring[i].valid_db_reg, 0);
8009bafec742SSukumar Swaminathan 			}
8010bafec742SSukumar Swaminathan 		}
8011bafec742SSukumar Swaminathan 		qlge_delay(QL_ONE_SEC_DELAY/4);
8012bafec742SSukumar Swaminathan 		qlge->mac_flags = QL_MAC_STOPPED;
8013bafec742SSukumar Swaminathan 		/* Reset adapter */
80140662fbf4SSukumar Swaminathan 		(void) ql_asic_reset(qlge);
8015bafec742SSukumar Swaminathan 		qlge_delay(100);
8016bafec742SSukumar Swaminathan 	}
8017bafec742SSukumar Swaminathan 
8018bafec742SSukumar Swaminathan 	return (DDI_SUCCESS);
8019bafec742SSukumar Swaminathan }
8020bafec742SSukumar Swaminathan 
8021bafec742SSukumar Swaminathan QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
8022bafec742SSukumar Swaminathan 
8023bafec742SSukumar Swaminathan /*
8024bafec742SSukumar Swaminathan  * Loadable Driver Interface Structures.
8025bafec742SSukumar Swaminathan  * Declare and initialize the module configuration section...
8026bafec742SSukumar Swaminathan  */
8027bafec742SSukumar Swaminathan static struct modldrv modldrv = {
8028bafec742SSukumar Swaminathan 	&mod_driverops,		/* type of module: driver */
8029bafec742SSukumar Swaminathan 	version,		/* name of module */
8030bafec742SSukumar Swaminathan 	&ql_ops			/* driver dev_ops */
8031bafec742SSukumar Swaminathan };
8032bafec742SSukumar Swaminathan 
8033bafec742SSukumar Swaminathan static struct modlinkage modlinkage = {
803495369d7bSToomas Soome 	MODREV_1,	&modldrv,	NULL
8035bafec742SSukumar Swaminathan };
8036bafec742SSukumar Swaminathan 
8037bafec742SSukumar Swaminathan /*
8038bafec742SSukumar Swaminathan  * Loadable Module Routines
8039bafec742SSukumar Swaminathan  */
8040bafec742SSukumar Swaminathan 
8041bafec742SSukumar Swaminathan /*
8042bafec742SSukumar Swaminathan  * _init
8043bafec742SSukumar Swaminathan  * Initializes a loadable module. It is called before any other
8044bafec742SSukumar Swaminathan  * routine in a loadable module.
8045bafec742SSukumar Swaminathan  */
8046bafec742SSukumar Swaminathan int
_init(void)8047bafec742SSukumar Swaminathan _init(void)
8048bafec742SSukumar Swaminathan {
8049bafec742SSukumar Swaminathan 	int rval;
8050bafec742SSukumar Swaminathan 
8051bafec742SSukumar Swaminathan 	mac_init_ops(&ql_ops, ADAPTER_NAME);
8052bafec742SSukumar Swaminathan 	rval = mod_install(&modlinkage);
8053bafec742SSukumar Swaminathan 	if (rval != DDI_SUCCESS) {
8054bafec742SSukumar Swaminathan 		mac_fini_ops(&ql_ops);
8055bafec742SSukumar Swaminathan 		cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
8056bafec742SSukumar Swaminathan 		    ADAPTER_NAME);
8057bafec742SSukumar Swaminathan 	}
8058bafec742SSukumar Swaminathan 
8059bafec742SSukumar Swaminathan 	return (rval);
8060bafec742SSukumar Swaminathan }
8061bafec742SSukumar Swaminathan 
8062bafec742SSukumar Swaminathan /*
8063bafec742SSukumar Swaminathan  * _fini
8064bafec742SSukumar Swaminathan  * Prepares a module for unloading. It is called when the system
8065bafec742SSukumar Swaminathan  * wants to unload a module. If the module determines that it can
8066bafec742SSukumar Swaminathan  * be unloaded, then _fini() returns the value returned by
8067bafec742SSukumar Swaminathan  * mod_remove(). Upon successful return from _fini() no other
8068bafec742SSukumar Swaminathan  * routine in the module will be called before _init() is called.
8069bafec742SSukumar Swaminathan  */
8070bafec742SSukumar Swaminathan int
_fini(void)8071bafec742SSukumar Swaminathan _fini(void)
8072bafec742SSukumar Swaminathan {
8073bafec742SSukumar Swaminathan 	int rval;
8074bafec742SSukumar Swaminathan 
8075bafec742SSukumar Swaminathan 	rval = mod_remove(&modlinkage);
8076bafec742SSukumar Swaminathan 	if (rval == DDI_SUCCESS) {
8077bafec742SSukumar Swaminathan 		mac_fini_ops(&ql_ops);
8078bafec742SSukumar Swaminathan 	}
8079bafec742SSukumar Swaminathan 
8080bafec742SSukumar Swaminathan 	return (rval);
8081bafec742SSukumar Swaminathan }
8082bafec742SSukumar Swaminathan 
8083bafec742SSukumar Swaminathan /*
8084bafec742SSukumar Swaminathan  * _info
8085bafec742SSukumar Swaminathan  * Returns information about loadable module.
8086bafec742SSukumar Swaminathan  */
8087bafec742SSukumar Swaminathan int
_info(struct modinfo * modinfop)8088bafec742SSukumar Swaminathan _info(struct modinfo *modinfop)
8089bafec742SSukumar Swaminathan {
8090bafec742SSukumar Swaminathan 	return (mod_info(&modlinkage, modinfop));
8091bafec742SSukumar Swaminathan }
8092