1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 QLogic Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2018, Joyent, Inc.
28  */
29 
30 #include <qlge.h>
31 #include <sys/atomic.h>
32 #include <sys/strsubr.h>
33 #include <sys/pattr.h>
34 #include <netinet/in.h>
35 #include <netinet/ip.h>
36 #include <netinet/ip6.h>
37 #include <netinet/tcp.h>
38 #include <netinet/udp.h>
39 #include <inet/ip.h>
40 
41 
42 
43 /*
44  * Local variables
45  */
46 static struct ether_addr ql_ether_broadcast_addr =
47 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
48 static char version[] = "GLDv3 QLogic 81XX " VERSIONSTR;
49 
50 /*
51  * Local function prototypes
52  */
53 static void ql_free_resources(qlge_t *);
54 static void ql_fini_kstats(qlge_t *);
55 static uint32_t ql_get_link_state(qlge_t *);
56 static void ql_read_conf(qlge_t *);
57 static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
58     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
59     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
60 static int ql_alloc_phys_rbuf(dev_info_t *, ddi_dma_handle_t *,
61     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
62     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
63 static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
64 static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
65 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
66 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
67 static int ql_bringdown_adapter(qlge_t *);
68 static int ql_bringup_adapter(qlge_t *);
69 static int ql_asic_reset(qlge_t *);
70 static void ql_wake_mpi_reset_soft_intr(qlge_t *);
71 static void ql_stop_timer(qlge_t *qlge);
72 static void ql_fm_fini(qlge_t *qlge);
73 int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring);
74 
75 /*
76  * TX dma maping handlers allow multiple sscatter-gather lists
77  */
78 ddi_dma_attr_t  tx_mapping_dma_attr = {
79 	DMA_ATTR_V0,			/* dma_attr_version */
80 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
81 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
82 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
83 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
84 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
85 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
86 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
87 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
88 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
89 	QL_DMA_GRANULARITY,		/* granularity of device */
90 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
91 };
92 
93 /*
94  * Receive buffers and Request/Response queues do not allow scatter-gather lists
95  */
96 ddi_dma_attr_t  dma_attr = {
97 	DMA_ATTR_V0,			/* dma_attr_version */
98 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
99 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
100 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
101 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
102 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
103 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
104 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
105 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
106 	1,				/* s/g list length, i.e no sg list */
107 	QL_DMA_GRANULARITY,		/* granularity of device */
108 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
109 };
110 /*
111  * Receive buffers do not allow scatter-gather lists
112  */
113 ddi_dma_attr_t  dma_attr_rbuf = {
114 	DMA_ATTR_V0,			/* dma_attr_version */
115 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
116 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
117 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
118 	0x1,				/* DMA address alignment, default - 8 */
119 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
120 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
121 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
122 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
123 	1,				/* s/g list length, i.e no sg list */
124 	QL_DMA_GRANULARITY,		/* granularity of device */
125 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
126 };
127 /*
128  * DMA access attribute structure.
129  */
130 /* device register access from host */
131 ddi_device_acc_attr_t ql_dev_acc_attr = {
132 	DDI_DEVICE_ATTR_V0,
133 	DDI_STRUCTURE_LE_ACC,
134 	DDI_STRICTORDER_ACC
135 };
136 
137 /* host ring descriptors */
138 ddi_device_acc_attr_t ql_desc_acc_attr = {
139 	DDI_DEVICE_ATTR_V0,
140 	DDI_NEVERSWAP_ACC,
141 	DDI_STRICTORDER_ACC
142 };
143 
144 /* host ring buffer */
145 ddi_device_acc_attr_t ql_buf_acc_attr = {
146 	DDI_DEVICE_ATTR_V0,
147 	DDI_NEVERSWAP_ACC,
148 	DDI_STRICTORDER_ACC
149 };
150 
151 /*
152  * Hash key table for Receive Side Scaling (RSS) support
153  */
154 const uint8_t key_data[] = {
155 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
156 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
157 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
158 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
159 
160 /*
161  * Shadow Registers:
162  * Outbound queues have a consumer index that is maintained by the chip.
163  * Inbound queues have a producer index that is maintained by the chip.
164  * For lower overhead, these registers are "shadowed" to host memory
165  * which allows the device driver to track the queue progress without
166  * PCI reads. When an entry is placed on an inbound queue, the chip will
167  * update the relevant index register and then copy the value to the
168  * shadow register in host memory.
169  * Currently, ql_read_sh_reg only read Inbound queues'producer index.
170  */
171 
172 static inline unsigned int
ql_read_sh_reg(qlge_t * qlge,struct rx_ring * rx_ring)173 ql_read_sh_reg(qlge_t *qlge, struct rx_ring *rx_ring)
174 {
175 	uint32_t rtn;
176 
177 	/* re-synchronize shadow prod index dma buffer before reading */
178 	(void) ddi_dma_sync(qlge->host_copy_shadow_dma_attr.dma_handle,
179 	    rx_ring->prod_idx_sh_reg_offset,
180 	    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
181 
182 	rtn = ddi_get32(qlge->host_copy_shadow_dma_attr.acc_handle,
183 	    (uint32_t *)rx_ring->prod_idx_sh_reg);
184 
185 	return (rtn);
186 }
187 
188 /*
189  * Read 32 bit atomically
190  */
191 uint32_t
ql_atomic_read_32(volatile uint32_t * target)192 ql_atomic_read_32(volatile uint32_t *target)
193 {
194 	/*
195 	 * atomic_add_32_nv returns the new value after the add,
196 	 * we are adding 0 so we should get the original value
197 	 */
198 	return (atomic_add_32_nv(target, 0));
199 }
200 
201 /*
202  * Set 32 bit atomically
203  */
204 void
ql_atomic_set_32(volatile uint32_t * target,uint32_t newval)205 ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
206 {
207 	(void) atomic_swap_32(target, newval);
208 }
209 
210 
211 /*
212  * Setup device PCI configuration registers.
213  * Kernel context.
214  */
215 static void
ql_pci_config(qlge_t * qlge)216 ql_pci_config(qlge_t *qlge)
217 {
218 	uint16_t w;
219 
220 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
221 	    PCI_CONF_VENID);
222 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
223 	    PCI_CONF_DEVID);
224 
225 	/*
226 	 * we want to respect framework's setting of PCI
227 	 * configuration space command register and also
228 	 * want to make sure that all bits of interest to us
229 	 * are properly set in PCI Command register(0x04).
230 	 * PCI_COMM_IO		0x1	 I/O access enable
231 	 * PCI_COMM_MAE		0x2	 Memory access enable
232 	 * PCI_COMM_ME		0x4	 bus master enable
233 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
234 	 */
235 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
236 	w = (uint16_t)(w & (~PCI_COMM_IO));
237 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
238 	    /* PCI_COMM_MEMWR_INVAL | */
239 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
240 
241 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
242 
243 	w = pci_config_get16(qlge->pci_handle, 0x54);
244 	w = (uint16_t)(w & (~0x7000));
245 	w = (uint16_t)(w | 0x5000);
246 	pci_config_put16(qlge->pci_handle, 0x54, w);
247 
248 	ql_dump_pci_config(qlge);
249 }
250 
251 /*
252  * This routine parforms the neccessary steps to set GLD mac information
253  * such as Function number, xgmac mask and shift bits
254  */
255 static int
ql_set_mac_info(qlge_t * qlge)256 ql_set_mac_info(qlge_t *qlge)
257 {
258 	uint32_t value;
259 	int rval = DDI_FAILURE;
260 	uint32_t fn0_net, fn1_net;
261 
262 	/* set default value */
263 	qlge->fn0_net = FN0_NET;
264 	qlge->fn1_net = FN1_NET;
265 
266 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
267 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
268 		    __func__, qlge->instance);
269 		goto exit;
270 	} else {
271 		fn0_net = (value >> 1) & 0x07;
272 		fn1_net = (value >> 5) & 0x07;
273 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
274 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
275 			    "nic0 function number %d,"
276 			    "nic1 function number %d "
277 			    "use default\n",
278 			    __func__, qlge->instance, value, fn0_net, fn1_net);
279 			goto exit;
280 		} else {
281 			qlge->fn0_net = fn0_net;
282 			qlge->fn1_net = fn1_net;
283 		}
284 	}
285 
286 	/* Get the function number that the driver is associated with */
287 	value = ql_read_reg(qlge, REG_STATUS);
288 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
289 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
290 	    value, qlge->func_number));
291 
292 	/* The driver is loaded on a non-NIC function? */
293 	if ((qlge->func_number != qlge->fn0_net) &&
294 	    (qlge->func_number != qlge->fn1_net)) {
295 		cmn_err(CE_WARN,
296 		    "Invalid function number = 0x%x\n", qlge->func_number);
297 		goto exit;
298 	}
299 	/* network port 0? */
300 	if (qlge->func_number == qlge->fn0_net) {
301 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
302 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
303 	} else {
304 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
305 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
306 	}
307 	rval = DDI_SUCCESS;
308 exit:
309 	return (rval);
310 
311 }
312 
313 /*
314  * write to doorbell register
315  */
316 void
ql_write_doorbell_reg(qlge_t * qlge,uint32_t * addr,uint32_t data)317 ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
318 {
319 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
320 }
321 
322 /*
323  * read from doorbell register
324  */
325 uint32_t
ql_read_doorbell_reg(qlge_t * qlge,uint32_t * addr)326 ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
327 {
328 	uint32_t ret;
329 
330 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
331 
332 	return	(ret);
333 }
334 
335 /*
336  * This function waits for a specific bit to come ready
337  * in a given register.  It is used mostly by the initialize
338  * process, but is also used in kernel thread API such as
339  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
340  */
341 static int
ql_wait_reg_rdy(qlge_t * qlge,uint32_t reg,uint32_t bit,uint32_t err_bit)342 ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
343 {
344 	uint32_t temp;
345 	int count = UDELAY_COUNT;
346 
347 	while (count) {
348 		temp = ql_read_reg(qlge, reg);
349 
350 		/* check for errors */
351 		if ((temp & err_bit) != 0) {
352 			break;
353 		} else if ((temp & bit) != 0)
354 			return (DDI_SUCCESS);
355 		qlge_delay(UDELAY_DELAY);
356 		count--;
357 	}
358 	cmn_err(CE_WARN,
359 	    "Waiting for reg %x to come ready failed.", reg);
360 	if (qlge->fm_enable) {
361 		ql_fm_ereport(qlge, DDI_FM_DEVICE_NO_RESPONSE);
362 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
363 	}
364 	return (DDI_FAILURE);
365 }
366 
367 /*
368  * The CFG register is used to download TX and RX control blocks
369  * to the chip. This function waits for an operation to complete.
370  */
371 static int
ql_wait_cfg(qlge_t * qlge,uint32_t bit)372 ql_wait_cfg(qlge_t *qlge, uint32_t bit)
373 {
374 	return (ql_wait_reg_bit(qlge, REG_CONFIGURATION, bit, BIT_RESET, 0));
375 }
376 
377 
378 /*
379  * Used to issue init control blocks to hw. Maps control block,
380  * sets address, triggers download, waits for completion.
381  */
382 static int
ql_write_cfg(qlge_t * qlge,uint32_t bit,uint64_t phy_addr,uint16_t q_id)383 ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
384 {
385 	int status = DDI_SUCCESS;
386 	uint32_t mask;
387 	uint32_t value;
388 
389 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
390 	if (status != DDI_SUCCESS) {
391 		goto exit;
392 	}
393 	status = ql_wait_cfg(qlge, bit);
394 	if (status != DDI_SUCCESS) {
395 		goto exit;
396 	}
397 
398 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
399 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
400 
401 	mask = CFG_Q_MASK | (bit << 16);
402 	value = bit | (q_id << CFG_Q_SHIFT);
403 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
404 
405 	/*
406 	 * Wait for the bit to clear after signaling hw.
407 	 */
408 	status = ql_wait_cfg(qlge, bit);
409 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
410 
411 exit:
412 	return (status);
413 }
414 
415 /*
416  * Initialize adapter instance
417  */
418 static int
ql_init_instance(qlge_t * qlge)419 ql_init_instance(qlge_t *qlge)
420 {
421 	int i;
422 
423 	/* Default value */
424 	qlge->mac_flags = QL_MAC_INIT;
425 	qlge->mtu = ETHERMTU;		/* set normal size as default */
426 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
427 
428 	for (i = 0; i < MAX_RX_RINGS; i++) {
429 		qlge->rx_polls[i] = 0;
430 		qlge->rx_interrupts[i] = 0;
431 	}
432 
433 	/*
434 	 * Set up the operating parameters.
435 	 */
436 	qlge->multicast_list_count = 0;
437 
438 	/*
439 	 * Set up the max number of unicast list
440 	 */
441 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
442 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
443 
444 	/*
445 	 * read user defined properties in .conf file
446 	 */
447 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
448 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
449 
450 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
451 
452 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
453 	ql_pci_config(qlge);
454 	qlge->ip_hdr_offset = 0;
455 
456 	if (qlge->device_id == 0x8000) {
457 		/* Schultz card */
458 		qlge->cfg_flags |= CFG_CHIP_8100;
459 		/* enable just ipv4 chksum offload for Schultz */
460 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
461 		/*
462 		 * Schultz firmware does not do pseduo IP header checksum
463 		 * calculation, needed to be done by driver
464 		 */
465 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
466 		if (qlge->lso_enable)
467 			qlge->cfg_flags |= CFG_LSO;
468 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
469 		/* Schultz must split packet header */
470 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
471 		qlge->max_read_mbx = 5;
472 		qlge->ip_hdr_offset = 2;
473 	}
474 
475 	/* Set Function Number and some of the iocb mac information */
476 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
477 		return (DDI_FAILURE);
478 
479 	/* Read network settings from NVRAM */
480 	/* After nvram is read successfully, update dev_addr */
481 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
482 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
483 		for (i = 0; i < ETHERADDRL; i++) {
484 			qlge->dev_addr.ether_addr_octet[i] =
485 			    qlge->nic_config.factory_MAC[i];
486 		}
487 	} else {
488 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
489 		    __func__, qlge->instance);
490 		return (DDI_FAILURE);
491 	}
492 
493 	bcopy(qlge->dev_addr.ether_addr_octet,
494 	    qlge->unicst_addr[0].addr.ether_addr_octet,
495 	    ETHERADDRL);
496 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
497 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
498 
499 	qlge->port_link_state = LS_DOWN;
500 
501 	return (DDI_SUCCESS);
502 }
503 
504 
505 /*
506  * This hardware semaphore provides the mechanism for exclusive access to
507  * resources shared between the NIC driver, MPI firmware,
508  * FCOE firmware and the FC driver.
509  */
510 static int
ql_sem_trylock(qlge_t * qlge,uint32_t sem_mask)511 ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
512 {
513 	uint32_t sem_bits = 0;
514 
515 	switch (sem_mask) {
516 	case SEM_XGMAC0_MASK:
517 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
518 		break;
519 	case SEM_XGMAC1_MASK:
520 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
521 		break;
522 	case SEM_ICB_MASK:
523 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
524 		break;
525 	case SEM_MAC_ADDR_MASK:
526 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
527 		break;
528 	case SEM_FLASH_MASK:
529 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
530 		break;
531 	case SEM_PROBE_MASK:
532 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
533 		break;
534 	case SEM_RT_IDX_MASK:
535 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
536 		break;
537 	case SEM_PROC_REG_MASK:
538 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
539 		break;
540 	default:
541 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
542 		return (DDI_FAILURE);
543 	}
544 
545 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
546 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
547 }
548 
549 /*
550  * Lock a specific bit of Semaphore register to gain
551  * access to a particular shared register
552  */
553 int
ql_sem_spinlock(qlge_t * qlge,uint32_t sem_mask)554 ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
555 {
556 	unsigned int wait_count = 30;
557 
558 	while (wait_count) {
559 		if (!ql_sem_trylock(qlge, sem_mask))
560 			return (DDI_SUCCESS);
561 		qlge_delay(100);
562 		wait_count--;
563 	}
564 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
565 	    __func__, qlge->instance, sem_mask);
566 	return (DDI_FAILURE);
567 }
568 
569 /*
570  * Unock a specific bit of Semaphore register to release
571  * access to a particular shared register
572  */
573 void
ql_sem_unlock(qlge_t * qlge,uint32_t sem_mask)574 ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
575 {
576 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
577 	(void) ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
578 }
579 
580 /*
581  * Get property value from configuration file.
582  *
583  * string = property string pointer.
584  *
585  * Returns:
586  * 0xFFFFFFFF = no property else property value.
587  */
588 static uint32_t
ql_get_prop(qlge_t * qlge,char * string)589 ql_get_prop(qlge_t *qlge, char *string)
590 {
591 	char buf[256];
592 	uint32_t data;
593 
594 	/* Get adapter instance parameter. */
595 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
596 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
597 	    (int)0xffffffff);
598 
599 	/* Adapter instance parameter found? */
600 	if (data == 0xffffffff) {
601 		/* No, get default parameter. */
602 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
603 		    string, (int)0xffffffff);
604 	}
605 
606 	return (data);
607 }
608 
609 /*
610  * Read user setting from configuration file.
611  */
612 static void
ql_read_conf(qlge_t * qlge)613 ql_read_conf(qlge_t *qlge)
614 {
615 	uint32_t data;
616 
617 	/* clear configuration flags */
618 	qlge->cfg_flags = 0;
619 
620 	/* Set up the default ring sizes. */
621 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
622 	data = ql_get_prop(qlge, "tx_ring_size");
623 	/* if data is valid */
624 	if ((data != 0xffffffff) && data) {
625 		if (qlge->tx_ring_size != data) {
626 			qlge->tx_ring_size = (uint16_t)data;
627 		}
628 	}
629 
630 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
631 	data = ql_get_prop(qlge, "rx_ring_size");
632 	/* if data is valid */
633 	if ((data != 0xffffffff) && data) {
634 		if (qlge->rx_ring_size != data) {
635 			qlge->rx_ring_size = (uint16_t)data;
636 		}
637 	}
638 
639 	qlge->tx_ring_count = 8;
640 	data = ql_get_prop(qlge, "tx_ring_count");
641 	/* if data is valid */
642 	if ((data != 0xffffffff) && data) {
643 		if (qlge->tx_ring_count != data) {
644 			qlge->tx_ring_count = (uint16_t)data;
645 		}
646 	}
647 
648 	qlge->rss_ring_count = 8;
649 	data = ql_get_prop(qlge, "rss_ring_count");
650 	/* if data is valid */
651 	if ((data != 0xffffffff) && data) {
652 		if (qlge->rss_ring_count != data) {
653 			qlge->rss_ring_count = (uint16_t)data;
654 		}
655 	}
656 
657 	/* Get default rx_copy enable/disable. */
658 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
659 	    data == 0) {
660 		qlge->rx_copy = B_FALSE;
661 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
662 	} else if (data == 1) {
663 		qlge->rx_copy = B_TRUE;
664 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
665 	}
666 
667 	qlge->rx_copy_threshold = qlge->rx_ring_size / 4;
668 	data = ql_get_prop(qlge, "rx_copy_threshold");
669 	if ((data != 0xffffffff) && (data != 0)) {
670 		qlge->rx_copy_threshold = data;
671 		cmn_err(CE_NOTE, "!new rx_copy_threshold %d \n",
672 		    qlge->rx_copy_threshold);
673 	}
674 
675 	/* Get mtu packet size. */
676 	data = ql_get_prop(qlge, "mtu");
677 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
678 		if (qlge->mtu != data) {
679 			qlge->mtu = data;
680 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
681 		}
682 	}
683 
684 	if (qlge->mtu == JUMBO_MTU) {
685 		qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT_JUMBO;
686 		qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT_JUMBO;
687 		qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT_JUMBO;
688 		qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT_JUMBO;
689 	}
690 
691 
692 	/* Get pause mode, default is Per Priority mode. */
693 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
694 	data = ql_get_prop(qlge, "pause");
695 	if (data <= PAUSE_MODE_PER_PRIORITY) {
696 		if (qlge->pause != data) {
697 			qlge->pause = data;
698 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
699 		}
700 	}
701 	/* Receive interrupt delay */
702 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
703 	data = ql_get_prop(qlge, "rx_intr_delay");
704 	/* if data is valid */
705 	if ((data != 0xffffffff) && data) {
706 		if (qlge->rx_coalesce_usecs != data) {
707 			qlge->rx_coalesce_usecs = (uint16_t)data;
708 		}
709 	}
710 	/* Rx inter-packet delay. */
711 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
712 	data = ql_get_prop(qlge, "rx_ipkt_delay");
713 	/* if data is valid */
714 	if ((data != 0xffffffff) && data) {
715 		if (qlge->rx_max_coalesced_frames != data) {
716 			qlge->rx_max_coalesced_frames = (uint16_t)data;
717 		}
718 	}
719 	/* Transmit interrupt delay */
720 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
721 	data = ql_get_prop(qlge, "tx_intr_delay");
722 	/* if data is valid */
723 	if ((data != 0xffffffff) && data) {
724 		if (qlge->tx_coalesce_usecs != data) {
725 			qlge->tx_coalesce_usecs = (uint16_t)data;
726 		}
727 	}
728 	/* Tx inter-packet delay. */
729 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
730 	data = ql_get_prop(qlge, "tx_ipkt_delay");
731 	/* if data is valid */
732 	if ((data != 0xffffffff) && data) {
733 		if (qlge->tx_max_coalesced_frames != data) {
734 			qlge->tx_max_coalesced_frames = (uint16_t)data;
735 		}
736 	}
737 
738 	/* Get split header payload_copy_thresh. */
739 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
740 	data = ql_get_prop(qlge, "payload_copy_thresh");
741 	/* if data is valid */
742 	if ((data != 0xffffffff) && (data != 0)) {
743 		if (qlge->payload_copy_thresh != data) {
744 			qlge->payload_copy_thresh = data;
745 		}
746 	}
747 
748 	/* large send offload (LSO) capability. */
749 	qlge->lso_enable = 1;
750 	data = ql_get_prop(qlge, "lso_enable");
751 	/* if data is valid */
752 	if ((data == 0) || (data == 1)) {
753 		if (qlge->lso_enable != data) {
754 			qlge->lso_enable = (uint16_t)data;
755 		}
756 	}
757 
758 	/* dcbx capability. */
759 	qlge->dcbx_enable = 1;
760 	data = ql_get_prop(qlge, "dcbx_enable");
761 	/* if data is valid */
762 	if ((data == 0) || (data == 1)) {
763 		if (qlge->dcbx_enable != data) {
764 			qlge->dcbx_enable = (uint16_t)data;
765 		}
766 	}
767 	/* fault management enable */
768 	qlge->fm_enable = B_TRUE;
769 	data = ql_get_prop(qlge, "fm-enable");
770 	if ((data == 0x1) || (data == 0)) {
771 		qlge->fm_enable = (boolean_t)data;
772 	}
773 
774 }
775 
776 /*
777  * Enable global interrupt
778  */
779 static void
ql_enable_global_interrupt(qlge_t * qlge)780 ql_enable_global_interrupt(qlge_t *qlge)
781 {
782 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
783 	    (INTR_EN_EI << 16) | INTR_EN_EI);
784 	qlge->flags |= INTERRUPTS_ENABLED;
785 }
786 
787 /*
788  * Disable global interrupt
789  */
790 static void
ql_disable_global_interrupt(qlge_t * qlge)791 ql_disable_global_interrupt(qlge_t *qlge)
792 {
793 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
794 	qlge->flags &= ~INTERRUPTS_ENABLED;
795 }
796 
797 /*
798  * Enable one ring interrupt
799  */
800 void
ql_enable_completion_interrupt(qlge_t * qlge,uint32_t intr)801 ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
802 {
803 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
804 
805 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
806 	    __func__, qlge->instance, intr, ctx->irq_cnt));
807 
808 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
809 		/*
810 		 * Always enable if we're MSIX multi interrupts and
811 		 * it's not the default (zeroeth) interrupt.
812 		 */
813 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
814 		return;
815 	}
816 
817 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
818 		mutex_enter(&qlge->hw_mutex);
819 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
820 		mutex_exit(&qlge->hw_mutex);
821 		QL_PRINT(DBG_INTR,
822 		    ("%s(%d): write %x to intr enable register \n",
823 		    __func__, qlge->instance, ctx->intr_en_mask));
824 	}
825 }
826 
827 /*
828  * ql_forced_disable_completion_interrupt
829  * Used by call from OS, may be called without
830  * a pending interrupt so force the disable
831  */
832 uint32_t
ql_forced_disable_completion_interrupt(qlge_t * qlge,uint32_t intr)833 ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
834 {
835 	uint32_t var = 0;
836 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
837 
838 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
839 	    __func__, qlge->instance, intr, ctx->irq_cnt));
840 
841 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
842 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
843 		var = ql_read_reg(qlge, REG_STATUS);
844 		return (var);
845 	}
846 
847 	mutex_enter(&qlge->hw_mutex);
848 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
849 	var = ql_read_reg(qlge, REG_STATUS);
850 	mutex_exit(&qlge->hw_mutex);
851 
852 	return (var);
853 }
854 
855 /*
856  * Disable a completion interrupt
857  */
858 void
ql_disable_completion_interrupt(qlge_t * qlge,uint32_t intr)859 ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
860 {
861 	struct intr_ctx *ctx;
862 
863 	ctx = qlge->intr_ctx + intr;
864 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
865 	    __func__, qlge->instance, intr, ctx->irq_cnt));
866 	/*
867 	 * HW disables for us if we're MSIX multi interrupts and
868 	 * it's not the default (zeroeth) interrupt.
869 	 */
870 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
871 		return;
872 
873 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
874 		mutex_enter(&qlge->hw_mutex);
875 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
876 		mutex_exit(&qlge->hw_mutex);
877 	}
878 	atomic_inc_32(&ctx->irq_cnt);
879 }
880 
881 /*
882  * Enable all completion interrupts
883  */
884 static void
ql_enable_all_completion_interrupts(qlge_t * qlge)885 ql_enable_all_completion_interrupts(qlge_t *qlge)
886 {
887 	int i;
888 	uint32_t value = 1;
889 
890 	for (i = 0; i < qlge->intr_cnt; i++) {
891 		/*
892 		 * Set the count to 1 for Legacy / MSI interrupts or for the
893 		 * default interrupt (0)
894 		 */
895 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
896 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
897 		}
898 		ql_enable_completion_interrupt(qlge, i);
899 	}
900 }
901 
902 /*
903  * Disable all completion interrupts
904  */
905 static void
ql_disable_all_completion_interrupts(qlge_t * qlge)906 ql_disable_all_completion_interrupts(qlge_t *qlge)
907 {
908 	int i;
909 	uint32_t value = 0;
910 
911 	for (i = 0; i < qlge->intr_cnt; i++) {
912 
913 		/*
914 		 * Set the count to 0 for Legacy / MSI interrupts or for the
915 		 * default interrupt (0)
916 		 */
917 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
918 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
919 
920 		ql_disable_completion_interrupt(qlge, i);
921 	}
922 }
923 
924 /*
925  * Update small buffer queue producer index
926  */
927 static void
ql_update_sbq_prod_idx(qlge_t * qlge,struct rx_ring * rx_ring)928 ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
929 {
930 	/* Update the buffer producer index */
931 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
932 	    rx_ring->sbq_prod_idx));
933 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
934 	    rx_ring->sbq_prod_idx);
935 }
936 
937 /*
938  * Update large buffer queue producer index
939  */
940 static void
ql_update_lbq_prod_idx(qlge_t * qlge,struct rx_ring * rx_ring)941 ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
942 {
943 	/* Update the buffer producer index */
944 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
945 	    rx_ring->lbq_prod_idx));
946 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
947 	    rx_ring->lbq_prod_idx);
948 }
949 
950 /*
951  * Adds a small buffer descriptor to end of its in use list,
952  * assumes sbq_lock is already taken
953  */
954 static void
ql_add_sbuf_to_in_use_list(struct rx_ring * rx_ring,struct bq_desc * sbq_desc)955 ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
956     struct bq_desc *sbq_desc)
957 {
958 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
959 
960 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
961 	inuse_idx++;
962 	if (inuse_idx >= rx_ring->sbq_len)
963 		inuse_idx = 0;
964 	rx_ring->sbq_use_tail = inuse_idx;
965 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
966 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
967 }
968 
969 /*
970  * Get a small buffer descriptor from its in use list
971  */
972 static struct bq_desc *
ql_get_sbuf_from_in_use_list(struct rx_ring * rx_ring)973 ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
974 {
975 	struct bq_desc *sbq_desc = NULL;
976 	uint32_t inuse_idx;
977 
978 	/* Pick from head of in use list */
979 	inuse_idx = rx_ring->sbq_use_head;
980 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
981 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
982 
983 	if (sbq_desc != NULL) {
984 		inuse_idx++;
985 		if (inuse_idx >= rx_ring->sbq_len)
986 			inuse_idx = 0;
987 		rx_ring->sbq_use_head = inuse_idx;
988 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
989 		atomic_inc_32(&rx_ring->rx_indicate);
990 		sbq_desc->upl_inuse = 1;
991 		/* if mp is NULL */
992 		if (sbq_desc->mp == NULL) {
993 			/* try to remap mp again */
994 			sbq_desc->mp =
995 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
996 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
997 		}
998 	}
999 
1000 	return (sbq_desc);
1001 }
1002 
1003 /*
1004  * Add a small buffer descriptor to its free list
1005  */
1006 static void
ql_add_sbuf_to_free_list(struct rx_ring * rx_ring,struct bq_desc * sbq_desc)1007 ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
1008     struct bq_desc *sbq_desc)
1009 {
1010 	uint32_t free_idx;
1011 
1012 	/* Add to the end of free list */
1013 	free_idx = rx_ring->sbq_free_tail;
1014 	rx_ring->sbuf_free[free_idx] = sbq_desc;
1015 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
1016 	free_idx++;
1017 	if (free_idx >= rx_ring->sbq_len)
1018 		free_idx = 0;
1019 	rx_ring->sbq_free_tail = free_idx;
1020 	atomic_inc_32(&rx_ring->sbuf_free_count);
1021 }
1022 
1023 /*
1024  * Get a small buffer descriptor from its free list
1025  */
1026 static struct bq_desc *
ql_get_sbuf_from_free_list(struct rx_ring * rx_ring)1027 ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
1028 {
1029 	struct bq_desc *sbq_desc;
1030 	uint32_t free_idx;
1031 
1032 	free_idx = rx_ring->sbq_free_head;
1033 	/* Pick from top of free list */
1034 	sbq_desc = rx_ring->sbuf_free[free_idx];
1035 	rx_ring->sbuf_free[free_idx] = NULL;
1036 	if (sbq_desc != NULL) {
1037 		free_idx++;
1038 		if (free_idx >= rx_ring->sbq_len)
1039 			free_idx = 0;
1040 		rx_ring->sbq_free_head = free_idx;
1041 		atomic_dec_32(&rx_ring->sbuf_free_count);
1042 	}
1043 	return (sbq_desc);
1044 }
1045 
1046 /*
1047  * Add a large buffer descriptor to its in use list
1048  */
1049 static void
ql_add_lbuf_to_in_use_list(struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1050 ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
1051     struct bq_desc *lbq_desc)
1052 {
1053 	uint32_t inuse_idx;
1054 
1055 	inuse_idx = rx_ring->lbq_use_tail;
1056 
1057 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
1058 	inuse_idx++;
1059 	if (inuse_idx >= rx_ring->lbq_len)
1060 		inuse_idx = 0;
1061 	rx_ring->lbq_use_tail = inuse_idx;
1062 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
1063 }
1064 
1065 /*
1066  * Get a large buffer descriptor from in use list
1067  */
1068 static struct bq_desc *
ql_get_lbuf_from_in_use_list(struct rx_ring * rx_ring)1069 ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
1070 {
1071 	struct bq_desc *lbq_desc;
1072 	uint32_t inuse_idx;
1073 
1074 	/* Pick from head of in use list */
1075 	inuse_idx = rx_ring->lbq_use_head;
1076 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
1077 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
1078 
1079 	if (lbq_desc != NULL) {
1080 		inuse_idx++;
1081 		if (inuse_idx >= rx_ring->lbq_len)
1082 			inuse_idx = 0;
1083 		rx_ring->lbq_use_head = inuse_idx;
1084 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
1085 		atomic_inc_32(&rx_ring->rx_indicate);
1086 		lbq_desc->upl_inuse = 1;
1087 
1088 		/* if mp is NULL */
1089 		if (lbq_desc->mp == NULL) {
1090 			/* try to remap mp again */
1091 			lbq_desc->mp =
1092 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1093 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1094 		}
1095 	}
1096 	return (lbq_desc);
1097 }
1098 
1099 /*
1100  * Add a large buffer descriptor to free list
1101  */
1102 static void
ql_add_lbuf_to_free_list(struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1103 ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1104     struct bq_desc *lbq_desc)
1105 {
1106 	uint32_t free_idx;
1107 
1108 	/* Add to the end of free list */
1109 	free_idx = rx_ring->lbq_free_tail;
1110 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1111 	free_idx++;
1112 	if (free_idx >= rx_ring->lbq_len)
1113 		free_idx = 0;
1114 	rx_ring->lbq_free_tail = free_idx;
1115 	atomic_inc_32(&rx_ring->lbuf_free_count);
1116 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1117 }
1118 
1119 /*
1120  * Get a large buffer descriptor from its free list
1121  */
1122 static struct bq_desc *
ql_get_lbuf_from_free_list(struct rx_ring * rx_ring)1123 ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1124 {
1125 	struct bq_desc *lbq_desc;
1126 	uint32_t free_idx;
1127 
1128 	free_idx = rx_ring->lbq_free_head;
1129 	/* Pick from head of free list */
1130 	lbq_desc = rx_ring->lbuf_free[free_idx];
1131 	rx_ring->lbuf_free[free_idx] = NULL;
1132 
1133 	if (lbq_desc != NULL) {
1134 		free_idx++;
1135 		if (free_idx >= rx_ring->lbq_len)
1136 			free_idx = 0;
1137 		rx_ring->lbq_free_head = free_idx;
1138 		atomic_dec_32(&rx_ring->lbuf_free_count);
1139 	}
1140 	return (lbq_desc);
1141 }
1142 
1143 /*
1144  * Add a small buffer descriptor to free list
1145  */
1146 static void
ql_refill_sbuf_free_list(struct bq_desc * sbq_desc,boolean_t alloc_memory)1147 ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1148 {
1149 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1150 	uint64_t *sbq_entry;
1151 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1152 	/*
1153 	 * Sync access
1154 	 */
1155 	mutex_enter(&rx_ring->sbq_lock);
1156 
1157 	sbq_desc->upl_inuse = 0;
1158 
1159 	/*
1160 	 * If we are freeing the buffers as a result of adapter unload, get out
1161 	 */
1162 	if ((sbq_desc->free_buf != 0) ||
1163 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1164 		if (sbq_desc->free_buf == 0)
1165 			atomic_dec_32(&rx_ring->rx_indicate);
1166 		mutex_exit(&rx_ring->sbq_lock);
1167 		return;
1168 	}
1169 #ifdef QLGE_LOAD_UNLOAD
1170 	if (rx_ring->rx_indicate == 0)
1171 		cmn_err(CE_WARN, "sbq: indicate wrong");
1172 #endif
1173 #ifdef QLGE_TRACK_BUFFER_USAGE
1174 	uint32_t sb_consumer_idx;
1175 	uint32_t sb_producer_idx;
1176 	uint32_t num_free_buffers;
1177 	uint32_t temp;
1178 
1179 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1180 	sb_producer_idx = temp & 0x0000ffff;
1181 	sb_consumer_idx = (temp >> 16);
1182 
1183 	if (sb_consumer_idx > sb_producer_idx)
1184 		num_free_buffers = NUM_SMALL_BUFFERS -
1185 		    (sb_consumer_idx - sb_producer_idx);
1186 	else
1187 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1188 
1189 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1190 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1191 
1192 #endif
1193 
1194 #ifdef QLGE_LOAD_UNLOAD
1195 	if (rx_ring->rx_indicate > 0xFF000000)
1196 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1197 		    " sbq_desc index %d.",
1198 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1199 		    sbq_desc->index);
1200 #endif
1201 	if (alloc_memory) {
1202 		sbq_desc->mp =
1203 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1204 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1205 		if (sbq_desc->mp == NULL) {
1206 			rx_ring->rx_failed_sbq_allocs++;
1207 		}
1208 	}
1209 
1210 	/* Got the packet from the stack decrement rx_indicate count */
1211 	atomic_dec_32(&rx_ring->rx_indicate);
1212 
1213 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1214 
1215 	/* Rearm if possible */
1216 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1217 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1218 		sbq_entry = rx_ring->sbq_dma.vaddr;
1219 		sbq_entry += rx_ring->sbq_prod_idx;
1220 
1221 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1222 			/* Get first one from free list */
1223 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1224 
1225 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1226 			sbq_entry++;
1227 			rx_ring->sbq_prod_idx++;
1228 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1229 				rx_ring->sbq_prod_idx = 0;
1230 				sbq_entry = rx_ring->sbq_dma.vaddr;
1231 			}
1232 			/* Add to end of in use list */
1233 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1234 		}
1235 
1236 		/* Update small buffer queue producer index */
1237 		ql_update_sbq_prod_idx(qlge, rx_ring);
1238 	}
1239 
1240 	mutex_exit(&rx_ring->sbq_lock);
1241 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1242 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1243 }
1244 
1245 /*
1246  * rx recycle call back function
1247  */
1248 static void
ql_release_to_sbuf_free_list(caddr_t p)1249 ql_release_to_sbuf_free_list(caddr_t p)
1250 {
1251 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1252 
1253 	if (sbq_desc == NULL)
1254 		return;
1255 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1256 }
1257 
1258 /*
1259  * Add a large buffer descriptor to free list
1260  */
1261 static void
ql_refill_lbuf_free_list(struct bq_desc * lbq_desc,boolean_t alloc_memory)1262 ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1263 {
1264 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1265 	uint64_t *lbq_entry;
1266 	qlge_t *qlge = rx_ring->qlge;
1267 
1268 	/* Sync access */
1269 	mutex_enter(&rx_ring->lbq_lock);
1270 
1271 	lbq_desc->upl_inuse = 0;
1272 	/*
1273 	 * If we are freeing the buffers as a result of adapter unload, get out
1274 	 */
1275 	if ((lbq_desc->free_buf != 0) ||
1276 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1277 		if (lbq_desc->free_buf == 0)
1278 			atomic_dec_32(&rx_ring->rx_indicate);
1279 		mutex_exit(&rx_ring->lbq_lock);
1280 		return;
1281 	}
1282 #ifdef QLGE_LOAD_UNLOAD
1283 	if (rx_ring->rx_indicate == 0)
1284 		cmn_err(CE_WARN, "lbq: indicate wrong");
1285 #endif
1286 #ifdef QLGE_TRACK_BUFFER_USAGE
1287 	uint32_t lb_consumer_idx;
1288 	uint32_t lb_producer_idx;
1289 	uint32_t num_free_buffers;
1290 	uint32_t temp;
1291 
1292 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1293 
1294 	lb_producer_idx = temp & 0x0000ffff;
1295 	lb_consumer_idx = (temp >> 16);
1296 
1297 	if (lb_consumer_idx > lb_producer_idx)
1298 		num_free_buffers = NUM_LARGE_BUFFERS -
1299 		    (lb_consumer_idx - lb_producer_idx);
1300 	else
1301 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1302 
1303 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1304 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1305 	}
1306 #endif
1307 
1308 #ifdef QLGE_LOAD_UNLOAD
1309 	if (rx_ring->rx_indicate > 0xFF000000)
1310 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1311 		    "lbq_desc index %d",
1312 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1313 		    lbq_desc->index);
1314 #endif
1315 	if (alloc_memory) {
1316 		lbq_desc->mp =
1317 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1318 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1319 		if (lbq_desc->mp == NULL) {
1320 			rx_ring->rx_failed_lbq_allocs++;
1321 		}
1322 	}
1323 
1324 	/* Got the packet from the stack decrement rx_indicate count */
1325 	atomic_dec_32(&rx_ring->rx_indicate);
1326 
1327 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1328 
1329 	/* Rearm if possible */
1330 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1331 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1332 		lbq_entry = rx_ring->lbq_dma.vaddr;
1333 		lbq_entry += rx_ring->lbq_prod_idx;
1334 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1335 			/* Get first one from free list */
1336 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1337 
1338 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1339 			lbq_entry++;
1340 			rx_ring->lbq_prod_idx++;
1341 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1342 				rx_ring->lbq_prod_idx = 0;
1343 				lbq_entry = rx_ring->lbq_dma.vaddr;
1344 			}
1345 
1346 			/* Add to end of in use list */
1347 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1348 		}
1349 
1350 		/* Update large buffer queue producer index */
1351 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1352 	}
1353 
1354 	mutex_exit(&rx_ring->lbq_lock);
1355 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1356 	    __func__, rx_ring->lbuf_free_count));
1357 }
1358 /*
1359  * rx recycle call back function
1360  */
1361 static void
ql_release_to_lbuf_free_list(caddr_t p)1362 ql_release_to_lbuf_free_list(caddr_t p)
1363 {
1364 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1365 
1366 	if (lbq_desc == NULL)
1367 		return;
1368 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1369 }
1370 
1371 /*
1372  * free small buffer queue buffers
1373  */
1374 static void
ql_free_sbq_buffers(struct rx_ring * rx_ring)1375 ql_free_sbq_buffers(struct rx_ring *rx_ring)
1376 {
1377 	struct bq_desc *sbq_desc;
1378 	uint32_t i;
1379 	uint32_t j = rx_ring->sbq_free_head;
1380 	int  force_cnt = 0;
1381 
1382 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1383 		sbq_desc = rx_ring->sbuf_free[j];
1384 		sbq_desc->free_buf = 1;
1385 		j++;
1386 		if (j >= rx_ring->sbq_len) {
1387 			j = 0;
1388 		}
1389 		if (sbq_desc->mp != NULL) {
1390 			freemsg(sbq_desc->mp);
1391 			sbq_desc->mp = NULL;
1392 		}
1393 	}
1394 	rx_ring->sbuf_free_count = 0;
1395 
1396 	j = rx_ring->sbq_use_head;
1397 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1398 		sbq_desc = rx_ring->sbuf_in_use[j];
1399 		sbq_desc->free_buf = 1;
1400 		j++;
1401 		if (j >= rx_ring->sbq_len) {
1402 			j = 0;
1403 		}
1404 		if (sbq_desc->mp != NULL) {
1405 			freemsg(sbq_desc->mp);
1406 			sbq_desc->mp = NULL;
1407 		}
1408 	}
1409 	rx_ring->sbuf_in_use_count = 0;
1410 
1411 	sbq_desc = &rx_ring->sbq_desc[0];
1412 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1413 		/*
1414 		 * Set flag so that the callback does not allocate a new buffer
1415 		 */
1416 		sbq_desc->free_buf = 1;
1417 		if (sbq_desc->upl_inuse != 0) {
1418 			force_cnt++;
1419 		}
1420 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1421 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1422 			    &sbq_desc->bd_dma.acc_handle);
1423 			sbq_desc->bd_dma.dma_handle = NULL;
1424 			sbq_desc->bd_dma.acc_handle = NULL;
1425 		}
1426 	}
1427 #ifdef QLGE_LOAD_UNLOAD
1428 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1429 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1430 #endif
1431 	if (rx_ring->sbuf_in_use != NULL) {
1432 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1433 		    sizeof (struct bq_desc *)));
1434 		rx_ring->sbuf_in_use = NULL;
1435 	}
1436 
1437 	if (rx_ring->sbuf_free != NULL) {
1438 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1439 		    sizeof (struct bq_desc *)));
1440 		rx_ring->sbuf_free = NULL;
1441 	}
1442 }
1443 
1444 /* Allocate small buffers */
1445 static int
ql_alloc_sbufs(qlge_t * qlge,struct rx_ring * rx_ring)1446 ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1447 {
1448 	struct bq_desc *sbq_desc;
1449 	int i;
1450 	ddi_dma_cookie_t dma_cookie;
1451 
1452 	rx_ring->sbq_use_head = 0;
1453 	rx_ring->sbq_use_tail = 0;
1454 	rx_ring->sbuf_in_use_count = 0;
1455 	rx_ring->sbq_free_head = 0;
1456 	rx_ring->sbq_free_tail = 0;
1457 	rx_ring->sbuf_free_count = 0;
1458 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1459 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1460 	if (rx_ring->sbuf_free == NULL) {
1461 		cmn_err(CE_WARN,
1462 		    "!%s: sbuf_free_list alloc: failed",
1463 		    __func__);
1464 		goto alloc_sbuf_err;
1465 	}
1466 
1467 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1468 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1469 	if (rx_ring->sbuf_in_use == NULL) {
1470 		cmn_err(CE_WARN,
1471 		    "!%s: sbuf_inuse_list alloc: failed",
1472 		    __func__);
1473 		goto alloc_sbuf_err;
1474 	}
1475 
1476 	sbq_desc = &rx_ring->sbq_desc[0];
1477 
1478 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1479 		/* Allocate buffer */
1480 		if (ql_alloc_phys_rbuf(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1481 		    &ql_buf_acc_attr,
1482 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1483 		    &sbq_desc->bd_dma.acc_handle,
1484 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1485 		    (size_t)0,				/* default alignment */
1486 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1487 		    &dma_cookie) != 0) {
1488 			cmn_err(CE_WARN,
1489 			    "!%s: ddi_dma_alloc_handle: failed",
1490 			    __func__);
1491 			goto alloc_sbuf_err;
1492 		}
1493 
1494 		/* Set context for Return buffer callback */
1495 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1496 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1497 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1498 		sbq_desc->rx_ring = rx_ring;
1499 		sbq_desc->upl_inuse = 0;
1500 		sbq_desc->free_buf = 0;
1501 
1502 		sbq_desc->mp =
1503 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1504 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1505 		if (sbq_desc->mp == NULL) {
1506 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1507 			goto alloc_sbuf_err;
1508 		}
1509 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1510 	}
1511 
1512 	return (DDI_SUCCESS);
1513 
1514 alloc_sbuf_err:
1515 	ql_free_sbq_buffers(rx_ring);
1516 	return (DDI_FAILURE);
1517 }
1518 
1519 static void
ql_free_lbq_buffers(struct rx_ring * rx_ring)1520 ql_free_lbq_buffers(struct rx_ring *rx_ring)
1521 {
1522 	struct bq_desc *lbq_desc;
1523 	uint32_t i, j;
1524 	int force_cnt = 0;
1525 
1526 	j = rx_ring->lbq_free_head;
1527 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1528 		lbq_desc = rx_ring->lbuf_free[j];
1529 		lbq_desc->free_buf = 1;
1530 		j++;
1531 		if (j >= rx_ring->lbq_len)
1532 			j = 0;
1533 		if (lbq_desc->mp != NULL) {
1534 			freemsg(lbq_desc->mp);
1535 			lbq_desc->mp = NULL;
1536 		}
1537 	}
1538 	rx_ring->lbuf_free_count = 0;
1539 
1540 	j = rx_ring->lbq_use_head;
1541 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1542 		lbq_desc = rx_ring->lbuf_in_use[j];
1543 		lbq_desc->free_buf = 1;
1544 		j++;
1545 		if (j >= rx_ring->lbq_len) {
1546 			j = 0;
1547 		}
1548 		if (lbq_desc->mp != NULL) {
1549 			freemsg(lbq_desc->mp);
1550 			lbq_desc->mp = NULL;
1551 		}
1552 	}
1553 	rx_ring->lbuf_in_use_count = 0;
1554 
1555 	lbq_desc = &rx_ring->lbq_desc[0];
1556 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1557 		/* Set flag so that callback will not allocate a new buffer */
1558 		lbq_desc->free_buf = 1;
1559 		if (lbq_desc->upl_inuse != 0) {
1560 			force_cnt++;
1561 		}
1562 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1563 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1564 			    &lbq_desc->bd_dma.acc_handle);
1565 			lbq_desc->bd_dma.dma_handle = NULL;
1566 			lbq_desc->bd_dma.acc_handle = NULL;
1567 		}
1568 	}
1569 #ifdef QLGE_LOAD_UNLOAD
1570 	if (force_cnt) {
1571 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1572 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1573 		    force_cnt);
1574 	}
1575 #endif
1576 	if (rx_ring->lbuf_in_use != NULL) {
1577 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1578 		    sizeof (struct bq_desc *)));
1579 		rx_ring->lbuf_in_use = NULL;
1580 	}
1581 
1582 	if (rx_ring->lbuf_free != NULL) {
1583 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1584 		    sizeof (struct bq_desc *)));
1585 		rx_ring->lbuf_free = NULL;
1586 	}
1587 }
1588 
1589 /* Allocate large buffers */
1590 static int
ql_alloc_lbufs(qlge_t * qlge,struct rx_ring * rx_ring)1591 ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1592 {
1593 	struct bq_desc *lbq_desc;
1594 	ddi_dma_cookie_t dma_cookie;
1595 	int i;
1596 	uint32_t lbq_buf_size;
1597 
1598 	rx_ring->lbq_use_head = 0;
1599 	rx_ring->lbq_use_tail = 0;
1600 	rx_ring->lbuf_in_use_count = 0;
1601 	rx_ring->lbq_free_head = 0;
1602 	rx_ring->lbq_free_tail = 0;
1603 	rx_ring->lbuf_free_count = 0;
1604 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1605 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1606 	if (rx_ring->lbuf_free == NULL) {
1607 		cmn_err(CE_WARN,
1608 		    "!%s: lbuf_free_list alloc: failed",
1609 		    __func__);
1610 		goto alloc_lbuf_err;
1611 	}
1612 
1613 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1614 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1615 
1616 	if (rx_ring->lbuf_in_use == NULL) {
1617 		cmn_err(CE_WARN,
1618 		    "!%s: lbuf_inuse_list alloc: failed",
1619 		    __func__);
1620 		goto alloc_lbuf_err;
1621 	}
1622 
1623 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1624 	    LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE;
1625 
1626 	lbq_desc = &rx_ring->lbq_desc[0];
1627 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1628 		rx_ring->lbq_buf_size = lbq_buf_size;
1629 		/* Allocate buffer */
1630 		if (ql_alloc_phys_rbuf(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1631 		    &ql_buf_acc_attr,
1632 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1633 		    &lbq_desc->bd_dma.acc_handle,
1634 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1635 		    (size_t)0, /* default alignment */
1636 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1637 		    &dma_cookie) != 0) {
1638 			cmn_err(CE_WARN,
1639 			    "!%s: ddi_dma_alloc_handle: failed",
1640 			    __func__);
1641 			goto alloc_lbuf_err;
1642 		}
1643 
1644 		/* Set context for Return buffer callback */
1645 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1646 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1647 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1648 		lbq_desc->rx_ring = rx_ring;
1649 		lbq_desc->upl_inuse = 0;
1650 		lbq_desc->free_buf = 0;
1651 
1652 		lbq_desc->mp =
1653 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1654 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1655 		if (lbq_desc->mp == NULL) {
1656 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1657 			goto alloc_lbuf_err;
1658 		}
1659 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1660 	} /* For all large buffers */
1661 
1662 	return (DDI_SUCCESS);
1663 
1664 alloc_lbuf_err:
1665 	ql_free_lbq_buffers(rx_ring);
1666 	return (DDI_FAILURE);
1667 }
1668 
1669 /*
1670  * Free rx buffers
1671  */
1672 static void
ql_free_rx_buffers(qlge_t * qlge)1673 ql_free_rx_buffers(qlge_t *qlge)
1674 {
1675 	int i;
1676 	struct rx_ring *rx_ring;
1677 
1678 	for (i = 0; i < qlge->rx_ring_count; i++) {
1679 		rx_ring = &qlge->rx_ring[i];
1680 		if (rx_ring->type != TX_Q) {
1681 			ql_free_lbq_buffers(rx_ring);
1682 			ql_free_sbq_buffers(rx_ring);
1683 		}
1684 	}
1685 }
1686 
1687 /*
1688  * Allocate rx buffers
1689  */
1690 static int
ql_alloc_rx_buffers(qlge_t * qlge)1691 ql_alloc_rx_buffers(qlge_t *qlge)
1692 {
1693 	struct rx_ring *rx_ring;
1694 	int i;
1695 
1696 	for (i = 0; i < qlge->rx_ring_count; i++) {
1697 		rx_ring = &qlge->rx_ring[i];
1698 		if (rx_ring->type != TX_Q) {
1699 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1700 				goto alloc_err;
1701 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1702 				goto alloc_err;
1703 		}
1704 	}
1705 #ifdef QLGE_TRACK_BUFFER_USAGE
1706 	for (i = 0; i < qlge->rx_ring_count; i++) {
1707 		if (qlge->rx_ring[i].type == RX_Q) {
1708 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1709 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1710 		}
1711 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1712 	}
1713 #endif
1714 	return (DDI_SUCCESS);
1715 
1716 alloc_err:
1717 	ql_free_rx_buffers(qlge);
1718 	return (DDI_FAILURE);
1719 }
1720 
1721 /*
1722  * Initialize large buffer queue ring
1723  */
1724 static void
ql_init_lbq_ring(struct rx_ring * rx_ring)1725 ql_init_lbq_ring(struct rx_ring *rx_ring)
1726 {
1727 	uint16_t i;
1728 	struct bq_desc *lbq_desc;
1729 
1730 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1731 	for (i = 0; i < rx_ring->lbq_len; i++) {
1732 		lbq_desc = &rx_ring->lbq_desc[i];
1733 		lbq_desc->index = i;
1734 	}
1735 }
1736 
1737 /*
1738  * Initialize small buffer queue ring
1739  */
1740 static void
ql_init_sbq_ring(struct rx_ring * rx_ring)1741 ql_init_sbq_ring(struct rx_ring *rx_ring)
1742 {
1743 	uint16_t i;
1744 	struct bq_desc *sbq_desc;
1745 
1746 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1747 	for (i = 0; i < rx_ring->sbq_len; i++) {
1748 		sbq_desc = &rx_ring->sbq_desc[i];
1749 		sbq_desc->index = i;
1750 	}
1751 }
1752 
1753 /*
1754  * Calculate the pseudo-header checksum if hardware can not do
1755  */
1756 static void
ql_pseudo_cksum(uint8_t * buf)1757 ql_pseudo_cksum(uint8_t *buf)
1758 {
1759 	uint32_t cksum;
1760 	uint16_t iphl;
1761 	uint16_t proto;
1762 
1763 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1764 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1765 	cksum += proto = buf[9];
1766 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1767 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1768 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1769 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1770 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1771 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1772 
1773 	/*
1774 	 * Point it to the TCP/UDP header, and
1775 	 * update the checksum field.
1776 	 */
1777 	buf += iphl + ((proto == IPPROTO_TCP) ?
1778 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1779 
1780 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1781 
1782 }
1783 
1784 /*
1785  * Transmit an incoming packet.
1786  */
1787 mblk_t *
ql_ring_tx(void * arg,mblk_t * mp)1788 ql_ring_tx(void *arg, mblk_t *mp)
1789 {
1790 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1791 	qlge_t *qlge = tx_ring->qlge;
1792 	mblk_t *next;
1793 	int rval;
1794 	uint32_t tx_count = 0;
1795 
1796 	if (qlge->port_link_state == LS_DOWN) {
1797 		/* can not send message while link is down */
1798 		mblk_t *tp;
1799 
1800 		while (mp != NULL) {
1801 			tp = mp->b_next;
1802 			mp->b_next = NULL;
1803 			freemsg(mp);
1804 			mp = tp;
1805 		}
1806 		goto exit;
1807 	}
1808 
1809 	mutex_enter(&tx_ring->tx_lock);
1810 	/* if mac is not started, driver is not ready, can not send */
1811 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1812 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1813 		    " return packets",
1814 		    __func__, qlge->instance, tx_ring->mac_flags);
1815 		mutex_exit(&tx_ring->tx_lock);
1816 		goto exit;
1817 	}
1818 
1819 	/* we must try to send all */
1820 	while (mp != NULL) {
1821 		/*
1822 		 * if number of available slots is less than a threshold,
1823 		 * then quit
1824 		 */
1825 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1826 			tx_ring->queue_stopped = 1;
1827 			rval = DDI_FAILURE;
1828 #ifdef QLGE_LOAD_UNLOAD
1829 			cmn_err(CE_WARN, "%s(%d) no resources",
1830 			    __func__, qlge->instance);
1831 #endif
1832 			tx_ring->defer++;
1833 			/*
1834 			 * If we return the buffer back we are expected to call
1835 			 * mac_tx_ring_update() when resources are available
1836 			 */
1837 			break;
1838 		}
1839 
1840 		next = mp->b_next;
1841 		mp->b_next = NULL;
1842 
1843 		rval = ql_send_common(tx_ring, mp);
1844 
1845 		if (rval != DDI_SUCCESS) {
1846 			mp->b_next = next;
1847 			break;
1848 		}
1849 		tx_count++;
1850 		mp = next;
1851 	}
1852 
1853 	/*
1854 	 * After all msg blocks are mapped or copied to tx buffer,
1855 	 * trigger the hardware to send!
1856 	 */
1857 	if (tx_count > 0) {
1858 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1859 		    tx_ring->prod_idx);
1860 	}
1861 
1862 	mutex_exit(&tx_ring->tx_lock);
1863 exit:
1864 	return (mp);
1865 }
1866 
1867 
1868 /*
1869  * This function builds an mblk list for the given inbound
1870  * completion.
1871  */
1872 
1873 static mblk_t *
ql_build_rx_mp(qlge_t * qlge,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1874 ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1875     struct ib_mac_iocb_rsp *ib_mac_rsp)
1876 {
1877 	mblk_t *mp = NULL;
1878 	mblk_t *mp1 = NULL;	/* packet header */
1879 	mblk_t *mp2 = NULL;	/* packet content */
1880 	struct bq_desc *lbq_desc;
1881 	struct bq_desc *sbq_desc;
1882 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1883 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1884 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1885 	uint32_t pkt_len = payload_len + header_len;
1886 	uint32_t done;
1887 	uint64_t *curr_ial_ptr;
1888 	uint32_t ial_data_addr_low;
1889 	uint32_t actual_data_addr_low;
1890 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1891 	uint32_t size;
1892 	uint32_t cp_offset;
1893 	boolean_t rx_copy = B_FALSE;
1894 	mblk_t *tp = NULL;
1895 
1896 	/*
1897 	 * Check if error flags are set
1898 	 */
1899 	if (err_flag != 0) {
1900 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1901 			rx_ring->frame_too_long++;
1902 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1903 			rx_ring->frame_too_short++;
1904 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1905 			rx_ring->fcs_err++;
1906 #ifdef QLGE_LOAD_UNLOAD
1907 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1908 #endif
1909 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1910 		    (uint8_t *)ib_mac_rsp, 8,
1911 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1912 	}
1913 
1914 	/* header should not be in large buffer */
1915 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1916 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1917 		err_flag |= 1;
1918 	}
1919 	/* if whole packet is too big than rx buffer size */
1920 	if (pkt_len > qlge->max_frame_size) {
1921 		cmn_err(CE_WARN, "ql_build_rx_mpframe too long(%d)!", pkt_len);
1922 		err_flag |= 1;
1923 	}
1924 	if (qlge->rx_copy ||
1925 	    (rx_ring->sbuf_in_use_count <= qlge->rx_copy_threshold) ||
1926 	    (rx_ring->lbuf_in_use_count <= qlge->rx_copy_threshold)) {
1927 		rx_copy = B_TRUE;
1928 	}
1929 
1930 	/* if using rx copy mode, we need to allocate a big enough buffer */
1931 	if (rx_copy) {
1932 		qlge->stats.norcvbuf++;
1933 		tp = allocb(payload_len + header_len + qlge->ip_hdr_offset,
1934 		    BPRI_MED);
1935 		if (tp == NULL) {
1936 			cmn_err(CE_WARN, "rx copy failed to allocate memory");
1937 		} else {
1938 			tp->b_rptr += qlge->ip_hdr_offset;
1939 		}
1940 	}
1941 	/*
1942 	 * Handle the header buffer if present.
1943 	 * packet header must be valid and saved in one small buffer
1944 	 * broadcast/multicast packets' headers not splitted
1945 	 */
1946 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1947 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1948 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1949 		    header_len));
1950 		/* Sync access */
1951 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1952 
1953 		ASSERT(sbq_desc != NULL);
1954 
1955 		/*
1956 		 * Validate addresses from the ASIC with the
1957 		 * expected sbuf address
1958 		 */
1959 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1960 		    != ib_mac_rsp->hdr_addr) {
1961 			/* Small buffer address mismatch */
1962 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1963 			    " in wrong small buffer",
1964 			    __func__, qlge->instance, rx_ring->cq_id);
1965 			goto fatal_error;
1966 		}
1967 		/* get this packet */
1968 		mp1 = sbq_desc->mp;
1969 		/* Flush DMA'd data */
1970 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1971 		    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1972 
1973 		if ((err_flag != 0)|| (mp1 == NULL)) {
1974 			/* failed on this packet, put it back for re-arming */
1975 #ifdef QLGE_LOAD_UNLOAD
1976 			cmn_err(CE_WARN, "get header from small buffer fail");
1977 #endif
1978 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1979 			mp1 = NULL;
1980 		} else if (rx_copy) {
1981 			if (tp != NULL) {
1982 				bcopy(sbq_desc->bd_dma.vaddr, tp->b_rptr,
1983 				    header_len);
1984 			}
1985 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1986 			mp1 = NULL;
1987 		} else {
1988 			if ((qlge->ip_hdr_offset != 0)&&
1989 			    (header_len < SMALL_BUFFER_SIZE)) {
1990 				/*
1991 				 * copy entire header to a 2 bytes boundary
1992 				 * address for 8100 adapters so that the IP
1993 				 * header can be on a 4 byte boundary address
1994 				 */
1995 				bcopy(mp1->b_rptr,
1996 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1997 				    qlge->ip_hdr_offset),
1998 				    header_len);
1999 				mp1->b_rptr += SMALL_BUFFER_SIZE +
2000 				    qlge->ip_hdr_offset;
2001 			}
2002 
2003 			/*
2004 			 * Adjust the mp payload_len to match
2005 			 * the packet header payload_len
2006 			 */
2007 			mp1->b_wptr = mp1->b_rptr + header_len;
2008 			mp1->b_next = mp1->b_cont = NULL;
2009 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
2010 			    (uint8_t *)mp1->b_rptr, 8, header_len);
2011 		}
2012 	}
2013 
2014 	/*
2015 	 * packet data or whole packet can be in small or one or
2016 	 * several large buffer(s)
2017 	 */
2018 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2019 		/*
2020 		 * The data is in a single small buffer.
2021 		 */
2022 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2023 
2024 		ASSERT(sbq_desc != NULL);
2025 
2026 		QL_PRINT(DBG_RX,
2027 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
2028 		    "sbq_desc->bd_dma.dma_addr = %x,"
2029 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2030 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
2031 		    ib_mac_rsp->data_addr, sbq_desc->mp));
2032 
2033 		/*
2034 		 * Validate  addresses from the ASIC with the
2035 		 * expected sbuf address
2036 		 */
2037 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
2038 		    != ib_mac_rsp->data_addr) {
2039 			/* Small buffer address mismatch */
2040 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2041 			    " in wrong small buffer",
2042 			    __func__, qlge->instance, rx_ring->cq_id);
2043 			goto fatal_error;
2044 		}
2045 		/* get this packet */
2046 		mp2 = sbq_desc->mp;
2047 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
2048 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2049 		if ((err_flag != 0) || (mp2 == NULL)) {
2050 #ifdef QLGE_LOAD_UNLOAD
2051 			/* failed on this packet, put it back for re-arming */
2052 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2053 #endif
2054 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2055 			mp2 = NULL;
2056 		} else if (rx_copy) {
2057 			if (tp != NULL) {
2058 				bcopy(sbq_desc->bd_dma.vaddr,
2059 				    tp->b_rptr + header_len, payload_len);
2060 				tp->b_wptr =
2061 				    tp->b_rptr + header_len + payload_len;
2062 			}
2063 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2064 			mp2 = NULL;
2065 		} else {
2066 			/* Adjust the buffer length to match the payload_len */
2067 			mp2->b_wptr = mp2->b_rptr + payload_len;
2068 			mp2->b_next = mp2->b_cont = NULL;
2069 			/* Flush DMA'd data */
2070 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2071 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2072 			/*
2073 			 * if payload is too small , copy to
2074 			 * the end of packet header
2075 			 */
2076 			if ((mp1 != NULL) &&
2077 			    (payload_len <= qlge->payload_copy_thresh) &&
2078 			    (pkt_len <
2079 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2080 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2081 				mp1->b_wptr += payload_len;
2082 				freemsg(mp2);
2083 				mp2 = NULL;
2084 			}
2085 		}
2086 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2087 		/*
2088 		 * The data is in a single large buffer.
2089 		 */
2090 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2091 
2092 		QL_PRINT(DBG_RX,
2093 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
2094 		    "lbq_desc->bd_dma.dma_addr = %x,"
2095 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2096 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
2097 		    ib_mac_rsp->data_addr, lbq_desc->mp));
2098 
2099 		ASSERT(lbq_desc != NULL);
2100 
2101 		/*
2102 		 * Validate  addresses from the ASIC with
2103 		 * the expected lbuf address
2104 		 */
2105 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
2106 		    != ib_mac_rsp->data_addr) {
2107 			/* Large buffer address mismatch */
2108 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2109 			    " in wrong large buffer",
2110 			    __func__, qlge->instance, rx_ring->cq_id);
2111 			goto fatal_error;
2112 		}
2113 		mp2 = lbq_desc->mp;
2114 		/* Flush DMA'd data */
2115 		(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2116 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2117 		if ((err_flag != 0) || (mp2 == NULL)) {
2118 #ifdef QLGE_LOAD_UNLOAD
2119 			cmn_err(CE_WARN, "ignore bad data from large buffer");
2120 #endif
2121 			/* failed on this packet, put it back for re-arming */
2122 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2123 			mp2 = NULL;
2124 		} else if (rx_copy) {
2125 			if (tp != NULL) {
2126 				bcopy(lbq_desc->bd_dma.vaddr,
2127 				    tp->b_rptr + header_len, payload_len);
2128 				tp->b_wptr =
2129 				    tp->b_rptr + header_len + payload_len;
2130 			}
2131 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2132 			mp2 = NULL;
2133 		} else {
2134 			/*
2135 			 * Adjust the buffer length to match
2136 			 * the packet payload_len
2137 			 */
2138 			mp2->b_wptr = mp2->b_rptr + payload_len;
2139 			mp2->b_next = mp2->b_cont = NULL;
2140 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2141 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2142 			/*
2143 			 * if payload is too small , copy to
2144 			 * the end of packet header
2145 			 */
2146 			if ((mp1 != NULL) &&
2147 			    (payload_len <= qlge->payload_copy_thresh) &&
2148 			    (pkt_len<
2149 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2150 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2151 				mp1->b_wptr += payload_len;
2152 				freemsg(mp2);
2153 				mp2 = NULL;
2154 			}
2155 		}
2156 	} else if (payload_len) { /* ial case */
2157 		/*
2158 		 * payload available but not in sml nor lrg buffer,
2159 		 * so, it is saved in IAL
2160 		 */
2161 #ifdef QLGE_LOAD_UNLOAD
2162 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2163 #endif
2164 		/* lrg buf addresses are saved in one small buffer */
2165 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2166 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2167 		done = 0;
2168 		cp_offset = 0;
2169 
2170 		while (!done) {
2171 			ial_data_addr_low =
2172 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2173 			    0xFFFFFFFE);
2174 			/* check if this is the last packet fragment */
2175 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2176 			curr_ial_ptr++;
2177 			/*
2178 			 * The data is in one or several large buffer(s).
2179 			 */
2180 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2181 			actual_data_addr_low =
2182 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2183 			    0xFFFFFFFE);
2184 			if (ial_data_addr_low != actual_data_addr_low) {
2185 				cmn_err(CE_WARN,
2186 				    "packet saved in wrong ial lrg buffer"
2187 				    " expected %x, actual %lx",
2188 				    ial_data_addr_low,
2189 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2190 				goto fatal_error;
2191 			}
2192 
2193 			size = (payload_len < rx_ring->lbq_buf_size)?
2194 			    payload_len : rx_ring->lbq_buf_size;
2195 			payload_len -= size;
2196 			mp2 = lbq_desc->mp;
2197 			if ((err_flag != 0) || (mp2 == NULL)) {
2198 #ifdef QLGE_LOAD_UNLOAD
2199 				cmn_err(CE_WARN,
2200 				    "ignore bad data from large buffer");
2201 #endif
2202 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2203 				mp2 = NULL;
2204 			} else if (rx_copy) {
2205 				if (tp != NULL) {
2206 					(void) ddi_dma_sync(
2207 					    lbq_desc->bd_dma.dma_handle,
2208 					    0, size, DDI_DMA_SYNC_FORKERNEL);
2209 					bcopy(lbq_desc->bd_dma.vaddr,
2210 					    tp->b_rptr + header_len + cp_offset,
2211 					    size);
2212 					tp->b_wptr =
2213 					    tp->b_rptr + size + cp_offset +
2214 					    header_len;
2215 					cp_offset += size;
2216 				}
2217 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2218 				mp2 = NULL;
2219 			} else {
2220 				if (mp_ial == NULL) {
2221 					mp_ial = mp2;
2222 				} else {
2223 					linkb(mp_ial, mp2);
2224 				}
2225 
2226 				mp2->b_next = NULL;
2227 				mp2->b_cont = NULL;
2228 				mp2->b_wptr = mp2->b_rptr + size;
2229 				/* Flush DMA'd data */
2230 				(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2231 				    0, size, DDI_DMA_SYNC_FORKERNEL);
2232 				QL_PRINT(DBG_RX, ("ial %d payload received \n",
2233 				    size));
2234 				QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2235 				    (uint8_t *)mp2->b_rptr, 8, size);
2236 			}
2237 		}
2238 		if (err_flag != 0) {
2239 #ifdef QLGE_LOAD_UNLOAD
2240 			/* failed on this packet, put it back for re-arming */
2241 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2242 #endif
2243 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2244 		} else {
2245 			mp2 = mp_ial;
2246 			freemsg(sbq_desc->mp);
2247 		}
2248 	}
2249 	/*
2250 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2251 	 * concatenate message block mp2 to the tail of message header, mp1
2252 	 */
2253 	if (!err_flag) {
2254 		if (rx_copy) {
2255 			if (tp != NULL) {
2256 				tp->b_next = NULL;
2257 				tp->b_cont = NULL;
2258 				tp->b_wptr = tp->b_rptr +
2259 				    header_len + payload_len;
2260 			}
2261 			mp = tp;
2262 		} else {
2263 			if (mp1) {
2264 				if (mp2) {
2265 					QL_PRINT(DBG_RX,
2266 					    ("packet in mp1 and mp2\n"));
2267 					/* mp1->b_cont = mp2; */
2268 					linkb(mp1, mp2);
2269 					mp = mp1;
2270 				} else {
2271 					QL_PRINT(DBG_RX,
2272 					    ("packet in mp1 only\n"));
2273 					mp = mp1;
2274 				}
2275 			} else if (mp2) {
2276 				QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2277 				mp = mp2;
2278 			}
2279 		}
2280 	}
2281 	return (mp);
2282 
2283 fatal_error:
2284 	/* fatal Error! */
2285 	if (qlge->fm_enable) {
2286 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2287 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2288 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2289 	}
2290 	if (tp) {
2291 		freemsg(tp);
2292 	}
2293 
2294 	/* *mp->b_wptr = 0; */
2295 	ql_wake_asic_reset_soft_intr(qlge);
2296 	return (NULL);
2297 
2298 }
2299 
2300 /*
2301  * Bump completion queue consumer index.
2302  */
2303 static void
ql_update_cq(struct rx_ring * rx_ring)2304 ql_update_cq(struct rx_ring *rx_ring)
2305 {
2306 	rx_ring->cnsmr_idx++;
2307 	rx_ring->curr_entry++;
2308 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2309 		rx_ring->cnsmr_idx = 0;
2310 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2311 	}
2312 }
2313 
2314 /*
2315  * Update completion queue consumer index.
2316  */
2317 static void
ql_write_cq_idx(struct rx_ring * rx_ring)2318 ql_write_cq_idx(struct rx_ring *rx_ring)
2319 {
2320 	qlge_t *qlge = rx_ring->qlge;
2321 
2322 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2323 	    rx_ring->cnsmr_idx);
2324 }
2325 
2326 /*
2327  * Processes a SYS-Chip Event Notification Completion Event.
2328  * The incoming notification event that describes a link up/down
2329  * or some sorts of error happens.
2330  */
2331 static void
ql_process_chip_ae_intr(qlge_t * qlge,struct ib_sys_event_iocb_rsp * ib_sys_event_rsp_ptr)2332 ql_process_chip_ae_intr(qlge_t *qlge,
2333     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2334 {
2335 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2336 	uint32_t soft_req = 0;
2337 
2338 	switch (eventType) {
2339 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2340 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2341 			break;
2342 
2343 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2344 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2345 			break;
2346 
2347 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2348 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2349 			    "occurred");
2350 			soft_req |= NEED_HW_RESET;
2351 			break;
2352 
2353 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2354 			cmn_err(CE_WARN, "Soft ECC error detected");
2355 			soft_req |= NEED_HW_RESET;
2356 			break;
2357 
2358 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2359 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2360 			    " error occured");
2361 			soft_req |= NEED_MPI_RESET;
2362 			break;
2363 
2364 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2365 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2366 			break;
2367 
2368 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2369 			cmn_err(CE_WARN, "PCI Error reading small/large "
2370 			    "buffers occured");
2371 			soft_req |= NEED_HW_RESET;
2372 			break;
2373 
2374 		default:
2375 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2376 			    "type 0x%x occured",
2377 			    __func__, qlge->instance, eventType));
2378 			break;
2379 	}
2380 
2381 	if ((soft_req & NEED_MPI_RESET) != 0) {
2382 		ql_wake_mpi_reset_soft_intr(qlge);
2383 		if (qlge->fm_enable) {
2384 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2385 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2386 		}
2387 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2388 		ql_wake_asic_reset_soft_intr(qlge);
2389 		if (qlge->fm_enable) {
2390 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2391 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2392 		}
2393 	}
2394 }
2395 
2396 /*
2397  * set received packet checksum flag
2398  */
2399 void
ql_set_rx_cksum(mblk_t * mp,struct ib_mac_iocb_rsp * net_rsp)2400 ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2401 {
2402 	uint32_t flags;
2403 
2404 	/* Not TCP or UDP packet? nothing more to do */
2405 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2406 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2407 		return;
2408 
2409 	/* No CKO support for IPv6 */
2410 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2411 		return;
2412 
2413 	/*
2414 	 * If checksum error, don't set flags; stack will calculate
2415 	 * checksum, detect the error and update statistics
2416 	 */
2417 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2418 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2419 		return;
2420 
2421 	/* TCP or UDP packet and checksum valid */
2422 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2423 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2424 		flags = HCK_FULLCKSUM_OK;
2425 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2426 	}
2427 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2428 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2429 		flags = HCK_FULLCKSUM_OK;
2430 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2431 	}
2432 }
2433 
2434 /*
2435  * This function goes through h/w descriptor in one specified rx ring,
2436  * receives the data if the descriptor status shows the data is ready.
2437  * It returns a chain of mblks containing the received data, to be
2438  * passed up to mac_rx_ring().
2439  */
2440 mblk_t *
ql_ring_rx(struct rx_ring * rx_ring,int poll_bytes)2441 ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2442 {
2443 	qlge_t *qlge = rx_ring->qlge;
2444 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2445 	struct ib_mac_iocb_rsp *net_rsp;
2446 	mblk_t *mp;
2447 	mblk_t *mblk_head;
2448 	mblk_t **mblk_tail;
2449 	uint32_t received_bytes = 0;
2450 	uint32_t length;
2451 #ifdef QLGE_PERFORMANCE
2452 	uint32_t pkt_ct = 0;
2453 #endif
2454 
2455 #ifdef QLGE_TRACK_BUFFER_USAGE
2456 	uint32_t consumer_idx;
2457 	uint32_t producer_idx;
2458 	uint32_t num_free_entries;
2459 	uint32_t temp;
2460 
2461 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2462 	consumer_idx = temp & 0x0000ffff;
2463 	producer_idx = (temp >> 16);
2464 
2465 	if (consumer_idx > producer_idx)
2466 		num_free_entries = (consumer_idx - producer_idx);
2467 	else
2468 		num_free_entries = NUM_RX_RING_ENTRIES - (
2469 		    producer_idx - consumer_idx);
2470 
2471 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2472 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2473 
2474 #endif
2475 	mblk_head = NULL;
2476 	mblk_tail = &mblk_head;
2477 
2478 	while ((prod != rx_ring->cnsmr_idx)) {
2479 		QL_PRINT(DBG_RX,
2480 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2481 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2482 
2483 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2484 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2485 		    (off_t)((uintptr_t)net_rsp -
2486 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2487 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2488 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2489 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2490 
2491 		switch (net_rsp->opcode) {
2492 
2493 		case OPCODE_IB_MAC_IOCB:
2494 			/* Adding length of pkt header and payload */
2495 			length = le32_to_cpu(net_rsp->data_len) +
2496 			    le32_to_cpu(net_rsp->hdr_len);
2497 			if ((poll_bytes != QLGE_POLL_ALL) &&
2498 			    ((received_bytes + length) > poll_bytes)) {
2499 				continue;
2500 			}
2501 			received_bytes += length;
2502 
2503 #ifdef QLGE_PERFORMANCE
2504 			pkt_ct++;
2505 #endif
2506 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2507 			if (mp != NULL) {
2508 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2509 					/*
2510 					 * Increment number of packets we have
2511 					 * indicated to the stack, should be
2512 					 * decremented when we get it back
2513 					 * or when freemsg is called
2514 					 */
2515 					ASSERT(rx_ring->rx_indicate
2516 					    <= rx_ring->cq_len);
2517 #ifdef QLGE_LOAD_UNLOAD
2518 					cmn_err(CE_WARN, "%s do not send to OS,"
2519 					    " mac_flags %d, indicate %d",
2520 					    __func__, rx_ring->mac_flags,
2521 					    rx_ring->rx_indicate);
2522 #endif
2523 					QL_PRINT(DBG_RX,
2524 					    ("cq_id = %d, packet "
2525 					    "dropped, mac not "
2526 					    "enabled.\n",
2527 					    rx_ring->cq_id));
2528 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2529 
2530 					/* rx_lock is expected to be held */
2531 					mutex_exit(&rx_ring->rx_lock);
2532 					freemsg(mp);
2533 					mutex_enter(&rx_ring->rx_lock);
2534 					mp = NULL;
2535 				}
2536 
2537 				if (mp != NULL) {
2538 					/*
2539 					 * IP full packet has been
2540 					 * successfully verified by
2541 					 * H/W and is correct
2542 					 */
2543 					ql_set_rx_cksum(mp, net_rsp);
2544 
2545 					rx_ring->rx_packets++;
2546 					rx_ring->rx_bytes += length;
2547 					*mblk_tail = mp;
2548 					mblk_tail = &mp->b_next;
2549 				}
2550 			} else {
2551 				QL_PRINT(DBG_RX,
2552 				    ("cq_id = %d, packet dropped\n",
2553 				    rx_ring->cq_id));
2554 				rx_ring->rx_packets_dropped_no_buffer++;
2555 			}
2556 			break;
2557 
2558 		case OPCODE_IB_SYS_EVENT_IOCB:
2559 			ql_process_chip_ae_intr(qlge,
2560 			    (struct ib_sys_event_iocb_rsp *)
2561 			    net_rsp);
2562 			break;
2563 
2564 		default:
2565 			cmn_err(CE_WARN,
2566 			    "%s Ring(%d)Hit default case, not handled!"
2567 			    " dropping the packet, "
2568 			    "opcode = %x.", __func__, rx_ring->cq_id,
2569 			    net_rsp->opcode);
2570 			break;
2571 		}
2572 		/* increment cnsmr_idx and curr_entry */
2573 		ql_update_cq(rx_ring);
2574 		prod = ql_read_sh_reg(qlge, rx_ring);
2575 
2576 	}
2577 
2578 #ifdef QLGE_PERFORMANCE
2579 	if (pkt_ct >= 7)
2580 		rx_ring->hist[7]++;
2581 	else if (pkt_ct == 6)
2582 		rx_ring->hist[6]++;
2583 	else if (pkt_ct == 5)
2584 		rx_ring->hist[5]++;
2585 	else if (pkt_ct == 4)
2586 		rx_ring->hist[4]++;
2587 	else if (pkt_ct == 3)
2588 		rx_ring->hist[3]++;
2589 	else if (pkt_ct == 2)
2590 		rx_ring->hist[2]++;
2591 	else if (pkt_ct == 1)
2592 		rx_ring->hist[1]++;
2593 	else if (pkt_ct == 0)
2594 		rx_ring->hist[0]++;
2595 #endif
2596 
2597 	/* update cnsmr_idx */
2598 	ql_write_cq_idx(rx_ring);
2599 	/* do not enable interrupt for polling mode */
2600 	if (poll_bytes == QLGE_POLL_ALL)
2601 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2602 	return (mblk_head);
2603 }
2604 
2605 /* Process an outbound completion from an rx ring. */
2606 static void
ql_process_mac_tx_intr(qlge_t * qlge,struct ob_mac_iocb_rsp * mac_rsp)2607 ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2608 {
2609 	struct tx_ring *tx_ring;
2610 	struct tx_ring_desc *tx_ring_desc;
2611 	int j;
2612 
2613 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2614 	tx_ring_desc = tx_ring->wq_desc;
2615 	tx_ring_desc += mac_rsp->tid;
2616 
2617 	if (tx_ring_desc->tx_type == USE_DMA) {
2618 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2619 		    __func__, qlge->instance));
2620 
2621 		/*
2622 		 * Release the DMA resource that is used for
2623 		 * DMA binding.
2624 		 */
2625 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2626 			(void) ddi_dma_unbind_handle(
2627 			    tx_ring_desc->tx_dma_handle[j]);
2628 		}
2629 
2630 		tx_ring_desc->tx_dma_handle_used = 0;
2631 		/*
2632 		 * Free the mblk after sending completed
2633 		 */
2634 		if (tx_ring_desc->mp != NULL) {
2635 			freemsg(tx_ring_desc->mp);
2636 			tx_ring_desc->mp = NULL;
2637 		}
2638 	}
2639 
2640 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2641 	tx_ring->opackets++;
2642 
2643 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2644 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2645 		tx_ring->errxmt++;
2646 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2647 			/* EMPTY */
2648 			QL_PRINT(DBG_TX,
2649 			    ("Total descriptor length did not match "
2650 			    "transfer length.\n"));
2651 		}
2652 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2653 			/* EMPTY */
2654 			QL_PRINT(DBG_TX,
2655 			    ("Frame too short to be legal, not sent.\n"));
2656 		}
2657 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2658 			/* EMPTY */
2659 			QL_PRINT(DBG_TX,
2660 			    ("Frame too long, but sent anyway.\n"));
2661 		}
2662 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2663 			/* EMPTY */
2664 			QL_PRINT(DBG_TX,
2665 			    ("PCI backplane error. Frame not sent.\n"));
2666 		}
2667 	}
2668 	atomic_inc_32(&tx_ring->tx_free_count);
2669 }
2670 
2671 /*
2672  * clean up tx completion iocbs
2673  */
2674 int
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2675 ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2676 {
2677 	qlge_t *qlge = rx_ring->qlge;
2678 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2679 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2680 	int count = 0;
2681 	struct tx_ring *tx_ring;
2682 	boolean_t resume_tx = B_FALSE;
2683 
2684 	mutex_enter(&rx_ring->rx_lock);
2685 #ifdef QLGE_TRACK_BUFFER_USAGE
2686 	{
2687 	uint32_t consumer_idx;
2688 	uint32_t producer_idx;
2689 	uint32_t num_free_entries;
2690 	uint32_t temp;
2691 
2692 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2693 	consumer_idx = temp & 0x0000ffff;
2694 	producer_idx = (temp >> 16);
2695 
2696 	if (consumer_idx > producer_idx)
2697 		num_free_entries = (consumer_idx - producer_idx);
2698 	else
2699 		num_free_entries = NUM_RX_RING_ENTRIES -
2700 		    (producer_idx - consumer_idx);
2701 
2702 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2703 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2704 
2705 	}
2706 #endif
2707 	/* While there are entries in the completion queue. */
2708 	while (prod != rx_ring->cnsmr_idx) {
2709 
2710 		QL_PRINT(DBG_RX,
2711 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2712 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2713 
2714 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2715 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2716 		    (off_t)((uintptr_t)net_rsp -
2717 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2718 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2719 
2720 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2721 		    "response packet data\n",
2722 		    rx_ring->curr_entry, 8,
2723 		    (size_t)sizeof (*net_rsp));
2724 
2725 		switch (net_rsp->opcode) {
2726 
2727 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2728 		case OPCODE_OB_MAC_IOCB:
2729 			ql_process_mac_tx_intr(qlge, net_rsp);
2730 			break;
2731 
2732 		default:
2733 			cmn_err(CE_WARN,
2734 			    "%s Hit default case, not handled! "
2735 			    "dropping the packet,"
2736 			    " opcode = %x.",
2737 			    __func__, net_rsp->opcode);
2738 			break;
2739 		}
2740 		count++;
2741 		ql_update_cq(rx_ring);
2742 		prod = ql_read_sh_reg(qlge, rx_ring);
2743 	}
2744 	ql_write_cq_idx(rx_ring);
2745 
2746 	mutex_exit(&rx_ring->rx_lock);
2747 
2748 	net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2749 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2750 
2751 	mutex_enter(&tx_ring->tx_lock);
2752 
2753 	if (tx_ring->queue_stopped &&
2754 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2755 		/*
2756 		 * The queue got stopped because the tx_ring was full.
2757 		 * Wake it up, because it's now at least 25% empty.
2758 		 */
2759 		tx_ring->queue_stopped = 0;
2760 		resume_tx = B_TRUE;
2761 	}
2762 
2763 	mutex_exit(&tx_ring->tx_lock);
2764 	/* Don't hold the lock during OS callback */
2765 	if (resume_tx)
2766 		RESUME_TX(tx_ring);
2767 	return (count);
2768 }
2769 
2770 /*
2771  * reset asic when error happens
2772  */
2773 /* ARGSUSED */
2774 static uint_t
ql_asic_reset_work(caddr_t arg1,caddr_t arg2)2775 ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2776 {
2777 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2778 	int status;
2779 
2780 	mutex_enter(&qlge->gen_mutex);
2781 	(void) ql_do_stop(qlge);
2782 	/*
2783 	 * Write default ethernet address to chip register Mac
2784 	 * Address slot 0 and Enable Primary Mac Function.
2785 	 */
2786 	mutex_enter(&qlge->hw_mutex);
2787 	(void) ql_unicst_set(qlge,
2788 	    (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
2789 	mutex_exit(&qlge->hw_mutex);
2790 	qlge->mac_flags = QL_MAC_INIT;
2791 	status = ql_do_start(qlge);
2792 	if (status != DDI_SUCCESS)
2793 		goto error;
2794 	qlge->mac_flags = QL_MAC_STARTED;
2795 	mutex_exit(&qlge->gen_mutex);
2796 	ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
2797 
2798 	return (DDI_INTR_CLAIMED);
2799 
2800 error:
2801 	mutex_exit(&qlge->gen_mutex);
2802 	cmn_err(CE_WARN,
2803 	    "qlge up/down cycle failed, closing device");
2804 	if (qlge->fm_enable) {
2805 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2806 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
2807 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2808 	}
2809 	return (DDI_INTR_CLAIMED);
2810 }
2811 
2812 /*
2813  * Reset MPI
2814  */
2815 /* ARGSUSED */
2816 static uint_t
ql_mpi_reset_work(caddr_t arg1,caddr_t arg2)2817 ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2818 {
2819 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2820 
2821 	(void) ql_reset_mpi_risc(qlge);
2822 	return (DDI_INTR_CLAIMED);
2823 }
2824 
2825 /*
2826  * Process MPI mailbox messages
2827  */
2828 /* ARGSUSED */
2829 static uint_t
ql_mpi_event_work(caddr_t arg1,caddr_t arg2)2830 ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2831 {
2832 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2833 
2834 	ql_do_mpi_intr(qlge);
2835 	return (DDI_INTR_CLAIMED);
2836 }
2837 
2838 /* Fire up a handler to reset the MPI processor. */
2839 void
ql_wake_asic_reset_soft_intr(qlge_t * qlge)2840 ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2841 {
2842 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2843 }
2844 
2845 static void
ql_wake_mpi_reset_soft_intr(qlge_t * qlge)2846 ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2847 {
2848 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2849 }
2850 
2851 static void
ql_wake_mpi_event_soft_intr(qlge_t * qlge)2852 ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2853 {
2854 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2855 }
2856 
2857 /*
2858  * This handles a fatal error, MPI activity, and the default
2859  * rx_ring in an MSI-X multiple interrupt vector environment.
2860  * In MSI/Legacy environment it also process the rest of
2861  * the rx_rings.
2862  */
2863 /* ARGSUSED */
2864 static uint_t
ql_isr(caddr_t arg1,caddr_t arg2)2865 ql_isr(caddr_t arg1, caddr_t arg2)
2866 {
2867 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2868 	struct rx_ring *ob_ring;
2869 	qlge_t *qlge = rx_ring->qlge;
2870 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2871 	uint32_t var, prod;
2872 	int i;
2873 	int work_done = 0;
2874 
2875 	mblk_t *mp;
2876 
2877 	_NOTE(ARGUNUSED(arg2));
2878 
2879 	++qlge->rx_interrupts[rx_ring->cq_id];
2880 
2881 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2882 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2883 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2884 		var = ql_read_reg(qlge, REG_STATUS);
2885 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2886 		return (DDI_INTR_CLAIMED);
2887 	}
2888 
2889 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2890 
2891 	/*
2892 	 * process send completes on first stride tx ring if available
2893 	 */
2894 	if (qlge->isr_stride) {
2895 		ob_ring = &qlge->rx_ring[qlge->isr_stride];
2896 		if (ql_read_sh_reg(qlge, ob_ring) !=
2897 		    ob_ring->cnsmr_idx) {
2898 			(void) ql_clean_outbound_rx_ring(ob_ring);
2899 		}
2900 	}
2901 	/*
2902 	 * Check the default queue and wake handler if active.
2903 	 */
2904 	rx_ring = &qlge->rx_ring[0];
2905 	prod = ql_read_sh_reg(qlge, rx_ring);
2906 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2907 	    prod, rx_ring->cnsmr_idx));
2908 	/* check if interrupt is due to incoming packet */
2909 	if (prod != rx_ring->cnsmr_idx) {
2910 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2911 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2912 		mutex_enter(&rx_ring->rx_lock);
2913 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2914 		mutex_exit(&rx_ring->rx_lock);
2915 
2916 		if (mp != NULL)
2917 			RX_UPSTREAM(rx_ring, mp);
2918 		work_done++;
2919 	} else {
2920 		/*
2921 		 * If interrupt is not due to incoming packet, read status
2922 		 * register to see if error happens or mailbox interrupt.
2923 		 */
2924 		var = ql_read_reg(qlge, REG_STATUS);
2925 		if ((var & STATUS_FE) != 0) {
2926 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2927 			if (qlge->fm_enable) {
2928 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2929 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2930 				ddi_fm_service_impact(qlge->dip,
2931 				    DDI_SERVICE_LOST);
2932 			}
2933 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2934 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2935 			cmn_err(CE_WARN,
2936 			    "Resetting chip. Error Status Register = 0x%x",
2937 			    var);
2938 			ql_wake_asic_reset_soft_intr(qlge);
2939 			return (DDI_INTR_CLAIMED);
2940 		}
2941 
2942 		/*
2943 		 * Check MPI processor activity.
2944 		 */
2945 		if ((var & STATUS_PI) != 0) {
2946 			/*
2947 			 * We've got an async event or mailbox completion.
2948 			 * Handle it and clear the source of the interrupt.
2949 			 */
2950 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2951 
2952 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2953 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2954 			ql_wake_mpi_event_soft_intr(qlge);
2955 			work_done++;
2956 		}
2957 	}
2958 
2959 
2960 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2961 		/*
2962 		 * Start the DPC for each active queue.
2963 		 */
2964 		for (i = 1; i < qlge->rx_ring_count; i++) {
2965 			rx_ring = &qlge->rx_ring[i];
2966 
2967 			if (ql_read_sh_reg(qlge, rx_ring) !=
2968 			    rx_ring->cnsmr_idx) {
2969 				QL_PRINT(DBG_INTR,
2970 				    ("Waking handler for rx_ring[%d].\n", i));
2971 
2972 				ql_disable_completion_interrupt(qlge,
2973 				    rx_ring->irq);
2974 				if (rx_ring->type == TX_Q) {
2975 					(void) ql_clean_outbound_rx_ring(
2976 					    rx_ring);
2977 					ql_enable_completion_interrupt(
2978 					    rx_ring->qlge, rx_ring->irq);
2979 				} else {
2980 					mutex_enter(&rx_ring->rx_lock);
2981 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2982 					mutex_exit(&rx_ring->rx_lock);
2983 					if (mp != NULL)
2984 						RX_UPSTREAM(rx_ring, mp);
2985 #ifdef QLGE_LOAD_UNLOAD
2986 					if (rx_ring->mac_flags ==
2987 					    QL_MAC_STOPPED)
2988 						cmn_err(CE_NOTE,
2989 						    "%s rx_indicate(%d) %d\n",
2990 						    __func__, i,
2991 						    rx_ring->rx_indicate);
2992 #endif
2993 				}
2994 				work_done++;
2995 			}
2996 		}
2997 	}
2998 
2999 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
3000 
3001 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
3002 }
3003 
3004 /*
3005  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
3006  */
3007 /* ARGSUSED */
3008 static uint_t
ql_msix_tx_isr(caddr_t arg1,caddr_t arg2)3009 ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
3010 {
3011 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3012 	qlge_t *qlge = rx_ring->qlge;
3013 	_NOTE(ARGUNUSED(arg2));
3014 
3015 	++qlge->rx_interrupts[rx_ring->cq_id];
3016 	(void) ql_clean_outbound_rx_ring(rx_ring);
3017 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
3018 
3019 	return (DDI_INTR_CLAIMED);
3020 }
3021 
3022 /*
3023  * MSI-X Multiple Vector Interrupt Handler
3024  */
3025 /* ARGSUSED */
3026 static uint_t
ql_msix_isr(caddr_t arg1,caddr_t arg2)3027 ql_msix_isr(caddr_t arg1, caddr_t arg2)
3028 {
3029 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3030 	struct rx_ring *ob_ring;
3031 	qlge_t *qlge = rx_ring->qlge;
3032 	mblk_t *mp;
3033 	_NOTE(ARGUNUSED(arg2));
3034 
3035 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3036 
3037 	ql_disable_completion_interrupt(qlge, rx_ring->irq);
3038 
3039 	/*
3040 	 * process send completes on stride tx ring if available
3041 	 */
3042 	if (qlge->isr_stride) {
3043 		ob_ring = rx_ring + qlge->isr_stride;
3044 		if (ql_read_sh_reg(qlge, ob_ring) !=
3045 		    ob_ring->cnsmr_idx) {
3046 			++qlge->rx_interrupts[ob_ring->cq_id];
3047 			(void) ql_clean_outbound_rx_ring(ob_ring);
3048 		}
3049 	}
3050 
3051 	++qlge->rx_interrupts[rx_ring->cq_id];
3052 
3053 	mutex_enter(&rx_ring->rx_lock);
3054 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3055 	mutex_exit(&rx_ring->rx_lock);
3056 
3057 	if (mp != NULL)
3058 		RX_UPSTREAM(rx_ring, mp);
3059 
3060 	return (DDI_INTR_CLAIMED);
3061 }
3062 
3063 /*
3064  * Poll n_bytes of chained incoming packets
3065  */
3066 mblk_t *
ql_ring_rx_poll(void * arg,int n_bytes)3067 ql_ring_rx_poll(void *arg, int n_bytes)
3068 {
3069 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
3070 	qlge_t *qlge = rx_ring->qlge;
3071 	mblk_t *mp = NULL;
3072 	uint32_t var;
3073 
3074 	ASSERT(n_bytes >= 0);
3075 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
3076 	    __func__, rx_ring->cq_id, n_bytes));
3077 
3078 	++qlge->rx_polls[rx_ring->cq_id];
3079 
3080 	if (n_bytes == 0)
3081 		return (mp);
3082 	mutex_enter(&rx_ring->rx_lock);
3083 	mp = ql_ring_rx(rx_ring, n_bytes);
3084 	mutex_exit(&rx_ring->rx_lock);
3085 
3086 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
3087 		var = ql_read_reg(qlge, REG_STATUS);
3088 		/*
3089 		 * Check for fatal error.
3090 		 */
3091 		if ((var & STATUS_FE) != 0) {
3092 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
3093 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
3094 			cmn_err(CE_WARN, "Got fatal error %x.", var);
3095 			ql_wake_asic_reset_soft_intr(qlge);
3096 			if (qlge->fm_enable) {
3097 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
3098 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
3099 				ddi_fm_service_impact(qlge->dip,
3100 				    DDI_SERVICE_LOST);
3101 			}
3102 		}
3103 		/*
3104 		 * Check MPI processor activity.
3105 		 */
3106 		if ((var & STATUS_PI) != 0) {
3107 			/*
3108 			 * We've got an async event or mailbox completion.
3109 			 * Handle it and clear the source of the interrupt.
3110 			 */
3111 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
3112 			ql_do_mpi_intr(qlge);
3113 		}
3114 	}
3115 
3116 	return (mp);
3117 }
3118 
3119 /*
3120  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
3121  */
3122 /* ARGSUSED */
3123 static uint_t
ql_msix_rx_isr(caddr_t arg1,caddr_t arg2)3124 ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
3125 {
3126 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3127 	qlge_t *qlge = rx_ring->qlge;
3128 	mblk_t *mp;
3129 	_NOTE(ARGUNUSED(arg2));
3130 
3131 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3132 
3133 	++qlge->rx_interrupts[rx_ring->cq_id];
3134 
3135 	mutex_enter(&rx_ring->rx_lock);
3136 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3137 	mutex_exit(&rx_ring->rx_lock);
3138 
3139 	if (mp != NULL)
3140 		RX_UPSTREAM(rx_ring, mp);
3141 
3142 	return (DDI_INTR_CLAIMED);
3143 }
3144 
3145 
3146 /*
3147  *
3148  * Allocate DMA Buffer for ioctl service
3149  *
3150  */
3151 static int
ql_alloc_ioctl_dma_buf(qlge_t * qlge)3152 ql_alloc_ioctl_dma_buf(qlge_t *qlge)
3153 {
3154 	uint64_t phy_addr;
3155 	uint64_t alloc_size;
3156 	ddi_dma_cookie_t dma_cookie;
3157 
3158 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
3159 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
3160 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
3161 	    &ql_buf_acc_attr,
3162 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3163 	    &qlge->ioctl_buf_dma_attr.acc_handle,
3164 	    (size_t)alloc_size,  /* mem size */
3165 	    (size_t)0,  /* alignment */
3166 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
3167 	    &dma_cookie) != 0) {
3168 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
3169 		    __func__, qlge->instance);
3170 		return (DDI_FAILURE);
3171 	}
3172 
3173 	phy_addr = dma_cookie.dmac_laddress;
3174 
3175 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
3176 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
3177 		return (DDI_FAILURE);
3178 	}
3179 
3180 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
3181 
3182 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
3183 	    "phy_addr = 0x%lx\n",
3184 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
3185 
3186 	return (DDI_SUCCESS);
3187 }
3188 
3189 
3190 /*
3191  * Function to free physical memory.
3192  */
3193 static void
ql_free_phys(ddi_dma_handle_t * dma_handle,ddi_acc_handle_t * acc_handle)3194 ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
3195 {
3196 	if (*dma_handle != NULL) {
3197 		(void) ddi_dma_unbind_handle(*dma_handle);
3198 		if (*acc_handle != NULL)
3199 			ddi_dma_mem_free(acc_handle);
3200 		ddi_dma_free_handle(dma_handle);
3201 		*acc_handle = NULL;
3202 		*dma_handle = NULL;
3203 	}
3204 }
3205 
3206 /*
3207  * Function to free ioctl dma buffer.
3208  */
3209 static void
ql_free_ioctl_dma_buf(qlge_t * qlge)3210 ql_free_ioctl_dma_buf(qlge_t *qlge)
3211 {
3212 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
3213 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
3214 		    &qlge->ioctl_buf_dma_attr.acc_handle);
3215 
3216 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
3217 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
3218 	}
3219 }
3220 
3221 /*
3222  * Free shadow register space used for request and completion queues
3223  */
3224 static void
ql_free_shadow_space(qlge_t * qlge)3225 ql_free_shadow_space(qlge_t *qlge)
3226 {
3227 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
3228 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3229 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
3230 		bzero(&qlge->host_copy_shadow_dma_attr,
3231 		    sizeof (qlge->host_copy_shadow_dma_attr));
3232 	}
3233 
3234 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
3235 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3236 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
3237 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3238 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3239 	}
3240 }
3241 
3242 /*
3243  * Allocate shadow register space for request and completion queues
3244  */
3245 static int
ql_alloc_shadow_space(qlge_t * qlge)3246 ql_alloc_shadow_space(qlge_t *qlge)
3247 {
3248 	ddi_dma_cookie_t dma_cookie;
3249 
3250 	if (ql_alloc_phys(qlge->dip,
3251 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
3252 	    &ql_dev_acc_attr,
3253 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3254 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
3255 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3256 	    (size_t)4, /* 4 bytes alignment */
3257 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
3258 	    &dma_cookie) != 0) {
3259 		bzero(&qlge->host_copy_shadow_dma_attr,
3260 		    sizeof (qlge->host_copy_shadow_dma_attr));
3261 
3262 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
3263 		    "response shadow registers", __func__, qlge->instance);
3264 		return (DDI_FAILURE);
3265 	}
3266 
3267 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3268 
3269 	if (ql_alloc_phys(qlge->dip,
3270 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3271 	    &ql_desc_acc_attr,
3272 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3273 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
3274 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3275 	    (size_t)4, /* 4 bytes alignment */
3276 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
3277 	    &dma_cookie) != 0) {
3278 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3279 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3280 
3281 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
3282 		    "for request shadow registers",
3283 		    __func__, qlge->instance);
3284 		goto err_wqp_sh_area;
3285 	}
3286 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3287 
3288 	return (