1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 QLogic Corporation. All rights reserved.
24  */
25 
26 #include <qlge.h>
27 #include <sys/atomic.h>
28 #include <sys/strsubr.h>
29 #include <sys/pattr.h>
30 #include <netinet/in.h>
31 #include <netinet/ip.h>
32 #include <netinet/ip6.h>
33 #include <netinet/tcp.h>
34 #include <netinet/udp.h>
35 #include <inet/ip.h>
36 
37 
38 
39 /*
40  * Local variables
41  */
42 static struct ether_addr ql_ether_broadcast_addr =
43 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
44 static char version[] = "GLDv3 QLogic 81XX " VERSIONSTR;
45 
46 /*
47  * Local function prototypes
48  */
49 static void ql_free_resources(qlge_t *);
50 static void ql_fini_kstats(qlge_t *);
51 static uint32_t ql_get_link_state(qlge_t *);
52 static void ql_read_conf(qlge_t *);
53 static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
54     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
55     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
56 static int ql_alloc_phys_rbuf(dev_info_t *, ddi_dma_handle_t *,
57     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
58     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
59 static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
60 static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
61 static int ql_route_initialize(qlge_t *);
62 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
63 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
64 static int ql_bringdown_adapter(qlge_t *);
65 static int ql_bringup_adapter(qlge_t *);
66 static int ql_asic_reset(qlge_t *);
67 static void ql_wake_mpi_reset_soft_intr(qlge_t *);
68 static void ql_stop_timer(qlge_t *qlge);
69 static void ql_fm_fini(qlge_t *qlge);
70 int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring);
71 
72 /*
73  * TX dma maping handlers allow multiple sscatter-gather lists
74  */
75 ddi_dma_attr_t  tx_mapping_dma_attr = {
76 	DMA_ATTR_V0,			/* dma_attr_version */
77 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
78 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
79 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
80 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
81 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
82 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
83 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
84 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
85 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
86 	QL_DMA_GRANULARITY,		/* granularity of device */
87 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
88 };
89 
90 /*
91  * Receive buffers and Request/Response queues do not allow scatter-gather lists
92  */
93 ddi_dma_attr_t  dma_attr = {
94 	DMA_ATTR_V0,			/* dma_attr_version */
95 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
96 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
97 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
98 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
99 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
100 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
101 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
102 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
103 	1,				/* s/g list length, i.e no sg list */
104 	QL_DMA_GRANULARITY,		/* granularity of device */
105 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
106 };
107 /*
108  * Receive buffers do not allow scatter-gather lists
109  */
110 ddi_dma_attr_t  dma_attr_rbuf = {
111 	DMA_ATTR_V0,			/* dma_attr_version */
112 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
113 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
114 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
115 	0x1,				/* DMA address alignment, default - 8 */
116 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
117 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
118 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
119 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
120 	1,				/* s/g list length, i.e no sg list */
121 	QL_DMA_GRANULARITY,		/* granularity of device */
122 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
123 };
124 /*
125  * DMA access attribute structure.
126  */
127 /* device register access from host */
128 ddi_device_acc_attr_t ql_dev_acc_attr = {
129 	DDI_DEVICE_ATTR_V0,
130 	DDI_STRUCTURE_LE_ACC,
131 	DDI_STRICTORDER_ACC
132 };
133 
134 /* host ring descriptors */
135 ddi_device_acc_attr_t ql_desc_acc_attr = {
136 	DDI_DEVICE_ATTR_V0,
137 	DDI_NEVERSWAP_ACC,
138 	DDI_STRICTORDER_ACC
139 };
140 
141 /* host ring buffer */
142 ddi_device_acc_attr_t ql_buf_acc_attr = {
143 	DDI_DEVICE_ATTR_V0,
144 	DDI_NEVERSWAP_ACC,
145 	DDI_STRICTORDER_ACC
146 };
147 
148 /*
149  * Hash key table for Receive Side Scaling (RSS) support
150  */
151 const uint8_t key_data[] = {
152 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
153 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
154 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
155 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
156 
157 /*
158  * Shadow Registers:
159  * Outbound queues have a consumer index that is maintained by the chip.
160  * Inbound queues have a producer index that is maintained by the chip.
161  * For lower overhead, these registers are "shadowed" to host memory
162  * which allows the device driver to track the queue progress without
163  * PCI reads. When an entry is placed on an inbound queue, the chip will
164  * update the relevant index register and then copy the value to the
165  * shadow register in host memory.
166  * Currently, ql_read_sh_reg only read Inbound queues'producer index.
167  */
168 
169 static inline unsigned int
170 ql_read_sh_reg(qlge_t *qlge, struct rx_ring *rx_ring)
171 {
172 	uint32_t rtn;
173 
174 	/* re-synchronize shadow prod index dma buffer before reading */
175 	(void) ddi_dma_sync(qlge->host_copy_shadow_dma_attr.dma_handle,
176 	    rx_ring->prod_idx_sh_reg_offset,
177 	    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
178 
179 	rtn = ddi_get32(qlge->host_copy_shadow_dma_attr.acc_handle,
180 	    (uint32_t *)rx_ring->prod_idx_sh_reg);
181 
182 	return (rtn);
183 }
184 
185 /*
186  * Read 32 bit atomically
187  */
188 uint32_t
189 ql_atomic_read_32(volatile uint32_t *target)
190 {
191 	/*
192 	 * atomic_add_32_nv returns the new value after the add,
193 	 * we are adding 0 so we should get the original value
194 	 */
195 	return (atomic_add_32_nv(target, 0));
196 }
197 
198 /*
199  * Set 32 bit atomically
200  */
201 void
202 ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
203 {
204 	(void) atomic_swap_32(target, newval);
205 }
206 
207 
208 /*
209  * Setup device PCI configuration registers.
210  * Kernel context.
211  */
212 static void
213 ql_pci_config(qlge_t *qlge)
214 {
215 	uint16_t w;
216 
217 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
218 	    PCI_CONF_VENID);
219 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
220 	    PCI_CONF_DEVID);
221 
222 	/*
223 	 * we want to respect framework's setting of PCI
224 	 * configuration space command register and also
225 	 * want to make sure that all bits of interest to us
226 	 * are properly set in PCI Command register(0x04).
227 	 * PCI_COMM_IO		0x1	 I/O access enable
228 	 * PCI_COMM_MAE		0x2	 Memory access enable
229 	 * PCI_COMM_ME		0x4	 bus master enable
230 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
231 	 */
232 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
233 	w = (uint16_t)(w & (~PCI_COMM_IO));
234 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
235 	    /* PCI_COMM_MEMWR_INVAL | */
236 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
237 
238 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
239 
240 	w = pci_config_get16(qlge->pci_handle, 0x54);
241 	w = (uint16_t)(w & (~0x7000));
242 	w = (uint16_t)(w | 0x5000);
243 	pci_config_put16(qlge->pci_handle, 0x54, w);
244 
245 	ql_dump_pci_config(qlge);
246 }
247 
248 /*
249  * This routine parforms the neccessary steps to set GLD mac information
250  * such as Function number, xgmac mask and shift bits
251  */
252 static int
253 ql_set_mac_info(qlge_t *qlge)
254 {
255 	uint32_t value;
256 	int rval = DDI_FAILURE;
257 	uint32_t fn0_net, fn1_net;
258 
259 	/* set default value */
260 	qlge->fn0_net = FN0_NET;
261 	qlge->fn1_net = FN1_NET;
262 
263 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
264 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
265 		    __func__, qlge->instance);
266 		goto exit;
267 	} else {
268 		fn0_net = (value >> 1) & 0x07;
269 		fn1_net = (value >> 5) & 0x07;
270 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
271 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
272 			    "nic0 function number %d,"
273 			    "nic1 function number %d "
274 			    "use default\n",
275 			    __func__, qlge->instance, value, fn0_net, fn1_net);
276 			goto exit;
277 		} else {
278 			qlge->fn0_net = fn0_net;
279 			qlge->fn1_net = fn1_net;
280 		}
281 	}
282 
283 	/* Get the function number that the driver is associated with */
284 	value = ql_read_reg(qlge, REG_STATUS);
285 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
286 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
287 	    value, qlge->func_number));
288 
289 	/* The driver is loaded on a non-NIC function? */
290 	if ((qlge->func_number != qlge->fn0_net) &&
291 	    (qlge->func_number != qlge->fn1_net)) {
292 		cmn_err(CE_WARN,
293 		    "Invalid function number = 0x%x\n", qlge->func_number);
294 		goto exit;
295 	}
296 	/* network port 0? */
297 	if (qlge->func_number == qlge->fn0_net) {
298 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
299 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
300 	} else {
301 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
302 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
303 	}
304 	rval = DDI_SUCCESS;
305 exit:
306 	return (rval);
307 
308 }
309 
310 /*
311  * write to doorbell register
312  */
313 void
314 ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
315 {
316 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
317 }
318 
319 /*
320  * read from doorbell register
321  */
322 uint32_t
323 ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
324 {
325 	uint32_t ret;
326 
327 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
328 
329 	return	(ret);
330 }
331 
332 /*
333  * This function waits for a specific bit to come ready
334  * in a given register.  It is used mostly by the initialize
335  * process, but is also used in kernel thread API such as
336  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
337  */
338 static int
339 ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
340 {
341 	uint32_t temp;
342 	int count = UDELAY_COUNT;
343 
344 	while (count) {
345 		temp = ql_read_reg(qlge, reg);
346 
347 		/* check for errors */
348 		if ((temp & err_bit) != 0) {
349 			break;
350 		} else if ((temp & bit) != 0)
351 			return (DDI_SUCCESS);
352 		qlge_delay(UDELAY_DELAY);
353 		count--;
354 	}
355 	cmn_err(CE_WARN,
356 	    "Waiting for reg %x to come ready failed.", reg);
357 	if (qlge->fm_enable) {
358 		ql_fm_ereport(qlge, DDI_FM_DEVICE_NO_RESPONSE);
359 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
360 	}
361 	return (DDI_FAILURE);
362 }
363 
364 /*
365  * The CFG register is used to download TX and RX control blocks
366  * to the chip. This function waits for an operation to complete.
367  */
368 static int
369 ql_wait_cfg(qlge_t *qlge, uint32_t bit)
370 {
371 	return (ql_wait_reg_bit(qlge, REG_CONFIGURATION, bit, BIT_RESET, 0));
372 }
373 
374 
375 /*
376  * Used to issue init control blocks to hw. Maps control block,
377  * sets address, triggers download, waits for completion.
378  */
379 static int
380 ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
381 {
382 	int status = DDI_SUCCESS;
383 	uint32_t mask;
384 	uint32_t value;
385 
386 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
387 	if (status != DDI_SUCCESS) {
388 		goto exit;
389 	}
390 	status = ql_wait_cfg(qlge, bit);
391 	if (status != DDI_SUCCESS) {
392 		goto exit;
393 	}
394 
395 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
396 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
397 
398 	mask = CFG_Q_MASK | (bit << 16);
399 	value = bit | (q_id << CFG_Q_SHIFT);
400 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
401 
402 	/*
403 	 * Wait for the bit to clear after signaling hw.
404 	 */
405 	status = ql_wait_cfg(qlge, bit);
406 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
407 
408 exit:
409 	return (status);
410 }
411 
412 /*
413  * Initialize adapter instance
414  */
415 static int
416 ql_init_instance(qlge_t *qlge)
417 {
418 	int i;
419 
420 	/* Default value */
421 	qlge->mac_flags = QL_MAC_INIT;
422 	qlge->mtu = ETHERMTU;		/* set normal size as default */
423 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
424 
425 	for (i = 0; i < MAX_RX_RINGS; i++) {
426 		qlge->rx_polls[i] = 0;
427 		qlge->rx_interrupts[i] = 0;
428 	}
429 
430 	/*
431 	 * Set up the operating parameters.
432 	 */
433 	qlge->multicast_list_count = 0;
434 
435 	/*
436 	 * Set up the max number of unicast list
437 	 */
438 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
439 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
440 
441 	/*
442 	 * read user defined properties in .conf file
443 	 */
444 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
445 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
446 
447 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
448 
449 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
450 	ql_pci_config(qlge);
451 	qlge->ip_hdr_offset = 0;
452 
453 	if (qlge->device_id == 0x8000) {
454 		/* Schultz card */
455 		qlge->cfg_flags |= CFG_CHIP_8100;
456 		/* enable just ipv4 chksum offload for Schultz */
457 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
458 		/*
459 		 * Schultz firmware does not do pseduo IP header checksum
460 		 * calculation, needed to be done by driver
461 		 */
462 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
463 		if (qlge->lso_enable)
464 			qlge->cfg_flags |= CFG_LSO;
465 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
466 		/* Schultz must split packet header */
467 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
468 		qlge->max_read_mbx = 5;
469 		qlge->ip_hdr_offset = 2;
470 	}
471 
472 	/* Set Function Number and some of the iocb mac information */
473 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
474 		return (DDI_FAILURE);
475 
476 	/* Read network settings from NVRAM */
477 	/* After nvram is read successfully, update dev_addr */
478 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
479 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
480 		for (i = 0; i < ETHERADDRL; i++) {
481 			qlge->dev_addr.ether_addr_octet[i] =
482 			    qlge->nic_config.factory_MAC[i];
483 		}
484 	} else {
485 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
486 		    __func__, qlge->instance);
487 		return (DDI_FAILURE);
488 	}
489 
490 	bcopy(qlge->dev_addr.ether_addr_octet,
491 	    qlge->unicst_addr[0].addr.ether_addr_octet,
492 	    ETHERADDRL);
493 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
494 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
495 
496 	qlge->port_link_state = LS_DOWN;
497 
498 	return (DDI_SUCCESS);
499 }
500 
501 
502 /*
503  * This hardware semaphore provides the mechanism for exclusive access to
504  * resources shared between the NIC driver, MPI firmware,
505  * FCOE firmware and the FC driver.
506  */
507 static int
508 ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
509 {
510 	uint32_t sem_bits = 0;
511 
512 	switch (sem_mask) {
513 	case SEM_XGMAC0_MASK:
514 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
515 		break;
516 	case SEM_XGMAC1_MASK:
517 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
518 		break;
519 	case SEM_ICB_MASK:
520 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
521 		break;
522 	case SEM_MAC_ADDR_MASK:
523 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
524 		break;
525 	case SEM_FLASH_MASK:
526 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
527 		break;
528 	case SEM_PROBE_MASK:
529 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
530 		break;
531 	case SEM_RT_IDX_MASK:
532 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
533 		break;
534 	case SEM_PROC_REG_MASK:
535 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
536 		break;
537 	default:
538 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
539 		return (DDI_FAILURE);
540 	}
541 
542 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
543 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
544 }
545 
546 /*
547  * Lock a specific bit of Semaphore register to gain
548  * access to a particular shared register
549  */
550 int
551 ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
552 {
553 	unsigned int wait_count = 30;
554 
555 	while (wait_count) {
556 		if (!ql_sem_trylock(qlge, sem_mask))
557 			return (DDI_SUCCESS);
558 		qlge_delay(100);
559 		wait_count--;
560 	}
561 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
562 	    __func__, qlge->instance, sem_mask);
563 	return (DDI_FAILURE);
564 }
565 
566 /*
567  * Unock a specific bit of Semaphore register to release
568  * access to a particular shared register
569  */
570 void
571 ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
572 {
573 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
574 	(void) ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
575 }
576 
577 /*
578  * Get property value from configuration file.
579  *
580  * string = property string pointer.
581  *
582  * Returns:
583  * 0xFFFFFFFF = no property else property value.
584  */
585 static uint32_t
586 ql_get_prop(qlge_t *qlge, char *string)
587 {
588 	char buf[256];
589 	uint32_t data;
590 
591 	/* Get adapter instance parameter. */
592 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
593 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
594 	    (int)0xffffffff);
595 
596 	/* Adapter instance parameter found? */
597 	if (data == 0xffffffff) {
598 		/* No, get default parameter. */
599 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
600 		    string, (int)0xffffffff);
601 	}
602 
603 	return (data);
604 }
605 
606 /*
607  * Read user setting from configuration file.
608  */
609 static void
610 ql_read_conf(qlge_t *qlge)
611 {
612 	uint32_t data;
613 
614 	/* clear configuration flags */
615 	qlge->cfg_flags = 0;
616 
617 	/* Set up the default ring sizes. */
618 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
619 	data = ql_get_prop(qlge, "tx_ring_size");
620 	/* if data is valid */
621 	if ((data != 0xffffffff) && data) {
622 		if (qlge->tx_ring_size != data) {
623 			qlge->tx_ring_size = (uint16_t)data;
624 		}
625 	}
626 
627 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
628 	data = ql_get_prop(qlge, "rx_ring_size");
629 	/* if data is valid */
630 	if ((data != 0xffffffff) && data) {
631 		if (qlge->rx_ring_size != data) {
632 			qlge->rx_ring_size = (uint16_t)data;
633 		}
634 	}
635 
636 	qlge->tx_ring_count = 8;
637 	data = ql_get_prop(qlge, "tx_ring_count");
638 	/* if data is valid */
639 	if ((data != 0xffffffff) && data) {
640 		if (qlge->tx_ring_count != data) {
641 			qlge->tx_ring_count = (uint16_t)data;
642 		}
643 	}
644 
645 	qlge->rss_ring_count = 8;
646 	data = ql_get_prop(qlge, "rss_ring_count");
647 	/* if data is valid */
648 	if ((data != 0xffffffff) && data) {
649 		if (qlge->rss_ring_count != data) {
650 			qlge->rss_ring_count = (uint16_t)data;
651 		}
652 	}
653 
654 	/* Get default rx_copy enable/disable. */
655 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
656 	    data == 0) {
657 		qlge->rx_copy = B_FALSE;
658 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
659 	} else if (data == 1) {
660 		qlge->rx_copy = B_TRUE;
661 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
662 	}
663 
664 	qlge->rx_copy_threshold = qlge->rx_ring_size / 4;
665 	data = ql_get_prop(qlge, "rx_copy_threshold");
666 	if ((data != 0xffffffff) && (data != 0)) {
667 		qlge->rx_copy_threshold = data;
668 		cmn_err(CE_NOTE, "!new rx_copy_threshold %d \n",
669 		    qlge->rx_copy_threshold);
670 	}
671 
672 	/* Get mtu packet size. */
673 	data = ql_get_prop(qlge, "mtu");
674 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
675 		if (qlge->mtu != data) {
676 			qlge->mtu = data;
677 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
678 		}
679 	}
680 
681 	if (qlge->mtu == JUMBO_MTU) {
682 		qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT_JUMBO;
683 		qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT_JUMBO;
684 		qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT_JUMBO;
685 		qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT_JUMBO;
686 	}
687 
688 
689 	/* Get pause mode, default is Per Priority mode. */
690 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
691 	data = ql_get_prop(qlge, "pause");
692 	if (data <= PAUSE_MODE_PER_PRIORITY) {
693 		if (qlge->pause != data) {
694 			qlge->pause = data;
695 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
696 		}
697 	}
698 	/* Receive interrupt delay */
699 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
700 	data = ql_get_prop(qlge, "rx_intr_delay");
701 	/* if data is valid */
702 	if ((data != 0xffffffff) && data) {
703 		if (qlge->rx_coalesce_usecs != data) {
704 			qlge->rx_coalesce_usecs = (uint16_t)data;
705 		}
706 	}
707 	/* Rx inter-packet delay. */
708 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
709 	data = ql_get_prop(qlge, "rx_ipkt_delay");
710 	/* if data is valid */
711 	if ((data != 0xffffffff) && data) {
712 		if (qlge->rx_max_coalesced_frames != data) {
713 			qlge->rx_max_coalesced_frames = (uint16_t)data;
714 		}
715 	}
716 	/* Transmit interrupt delay */
717 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
718 	data = ql_get_prop(qlge, "tx_intr_delay");
719 	/* if data is valid */
720 	if ((data != 0xffffffff) && data) {
721 		if (qlge->tx_coalesce_usecs != data) {
722 			qlge->tx_coalesce_usecs = (uint16_t)data;
723 		}
724 	}
725 	/* Tx inter-packet delay. */
726 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
727 	data = ql_get_prop(qlge, "tx_ipkt_delay");
728 	/* if data is valid */
729 	if ((data != 0xffffffff) && data) {
730 		if (qlge->tx_max_coalesced_frames != data) {
731 			qlge->tx_max_coalesced_frames = (uint16_t)data;
732 		}
733 	}
734 
735 	/* Get split header payload_copy_thresh. */
736 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
737 	data = ql_get_prop(qlge, "payload_copy_thresh");
738 	/* if data is valid */
739 	if ((data != 0xffffffff) && (data != 0)) {
740 		if (qlge->payload_copy_thresh != data) {
741 			qlge->payload_copy_thresh = data;
742 		}
743 	}
744 
745 	/* large send offload (LSO) capability. */
746 	qlge->lso_enable = 1;
747 	data = ql_get_prop(qlge, "lso_enable");
748 	/* if data is valid */
749 	if ((data == 0) || (data == 1)) {
750 		if (qlge->lso_enable != data) {
751 			qlge->lso_enable = (uint16_t)data;
752 		}
753 	}
754 
755 	/* dcbx capability. */
756 	qlge->dcbx_enable = 1;
757 	data = ql_get_prop(qlge, "dcbx_enable");
758 	/* if data is valid */
759 	if ((data == 0) || (data == 1)) {
760 		if (qlge->dcbx_enable != data) {
761 			qlge->dcbx_enable = (uint16_t)data;
762 		}
763 	}
764 	/* fault management enable */
765 	qlge->fm_enable = B_TRUE;
766 	data = ql_get_prop(qlge, "fm-enable");
767 	if ((data == 0x1) || (data == 0)) {
768 		qlge->fm_enable = (boolean_t)data;
769 	}
770 
771 }
772 
773 /*
774  * Enable global interrupt
775  */
776 static void
777 ql_enable_global_interrupt(qlge_t *qlge)
778 {
779 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
780 	    (INTR_EN_EI << 16) | INTR_EN_EI);
781 	qlge->flags |= INTERRUPTS_ENABLED;
782 }
783 
784 /*
785  * Disable global interrupt
786  */
787 static void
788 ql_disable_global_interrupt(qlge_t *qlge)
789 {
790 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
791 	qlge->flags &= ~INTERRUPTS_ENABLED;
792 }
793 
794 /*
795  * Enable one ring interrupt
796  */
797 void
798 ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
799 {
800 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
801 
802 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
803 	    __func__, qlge->instance, intr, ctx->irq_cnt));
804 
805 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
806 		/*
807 		 * Always enable if we're MSIX multi interrupts and
808 		 * it's not the default (zeroeth) interrupt.
809 		 */
810 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
811 		return;
812 	}
813 
814 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
815 		mutex_enter(&qlge->hw_mutex);
816 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
817 		mutex_exit(&qlge->hw_mutex);
818 		QL_PRINT(DBG_INTR,
819 		    ("%s(%d): write %x to intr enable register \n",
820 		    __func__, qlge->instance, ctx->intr_en_mask));
821 	}
822 }
823 
824 /*
825  * ql_forced_disable_completion_interrupt
826  * Used by call from OS, may be called without
827  * a pending interrupt so force the disable
828  */
829 uint32_t
830 ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
831 {
832 	uint32_t var = 0;
833 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
834 
835 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
836 	    __func__, qlge->instance, intr, ctx->irq_cnt));
837 
838 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
839 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
840 		var = ql_read_reg(qlge, REG_STATUS);
841 		return (var);
842 	}
843 
844 	mutex_enter(&qlge->hw_mutex);
845 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
846 	var = ql_read_reg(qlge, REG_STATUS);
847 	mutex_exit(&qlge->hw_mutex);
848 
849 	return (var);
850 }
851 
852 /*
853  * Disable a completion interrupt
854  */
855 void
856 ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
857 {
858 	struct intr_ctx *ctx;
859 
860 	ctx = qlge->intr_ctx + intr;
861 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
862 	    __func__, qlge->instance, intr, ctx->irq_cnt));
863 	/*
864 	 * HW disables for us if we're MSIX multi interrupts and
865 	 * it's not the default (zeroeth) interrupt.
866 	 */
867 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
868 		return;
869 
870 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
871 		mutex_enter(&qlge->hw_mutex);
872 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
873 		mutex_exit(&qlge->hw_mutex);
874 	}
875 	atomic_inc_32(&ctx->irq_cnt);
876 }
877 
878 /*
879  * Enable all completion interrupts
880  */
881 static void
882 ql_enable_all_completion_interrupts(qlge_t *qlge)
883 {
884 	int i;
885 	uint32_t value = 1;
886 
887 	for (i = 0; i < qlge->intr_cnt; i++) {
888 		/*
889 		 * Set the count to 1 for Legacy / MSI interrupts or for the
890 		 * default interrupt (0)
891 		 */
892 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
893 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
894 		}
895 		ql_enable_completion_interrupt(qlge, i);
896 	}
897 }
898 
899 /*
900  * Disable all completion interrupts
901  */
902 static void
903 ql_disable_all_completion_interrupts(qlge_t *qlge)
904 {
905 	int i;
906 	uint32_t value = 0;
907 
908 	for (i = 0; i < qlge->intr_cnt; i++) {
909 
910 		/*
911 		 * Set the count to 0 for Legacy / MSI interrupts or for the
912 		 * default interrupt (0)
913 		 */
914 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
915 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
916 
917 		ql_disable_completion_interrupt(qlge, i);
918 	}
919 }
920 
921 /*
922  * Update small buffer queue producer index
923  */
924 static void
925 ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
926 {
927 	/* Update the buffer producer index */
928 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
929 	    rx_ring->sbq_prod_idx));
930 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
931 	    rx_ring->sbq_prod_idx);
932 }
933 
934 /*
935  * Update large buffer queue producer index
936  */
937 static void
938 ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
939 {
940 	/* Update the buffer producer index */
941 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
942 	    rx_ring->lbq_prod_idx));
943 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
944 	    rx_ring->lbq_prod_idx);
945 }
946 
947 /*
948  * Adds a small buffer descriptor to end of its in use list,
949  * assumes sbq_lock is already taken
950  */
951 static void
952 ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
953     struct bq_desc *sbq_desc)
954 {
955 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
956 
957 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
958 	inuse_idx++;
959 	if (inuse_idx >= rx_ring->sbq_len)
960 		inuse_idx = 0;
961 	rx_ring->sbq_use_tail = inuse_idx;
962 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
963 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
964 }
965 
966 /*
967  * Get a small buffer descriptor from its in use list
968  */
969 static struct bq_desc *
970 ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
971 {
972 	struct bq_desc *sbq_desc = NULL;
973 	uint32_t inuse_idx;
974 
975 	/* Pick from head of in use list */
976 	inuse_idx = rx_ring->sbq_use_head;
977 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
978 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
979 
980 	if (sbq_desc != NULL) {
981 		inuse_idx++;
982 		if (inuse_idx >= rx_ring->sbq_len)
983 			inuse_idx = 0;
984 		rx_ring->sbq_use_head = inuse_idx;
985 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
986 		atomic_inc_32(&rx_ring->rx_indicate);
987 		sbq_desc->upl_inuse = 1;
988 		/* if mp is NULL */
989 		if (sbq_desc->mp == NULL) {
990 			/* try to remap mp again */
991 			sbq_desc->mp =
992 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
993 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
994 		}
995 	}
996 
997 	return (sbq_desc);
998 }
999 
1000 /*
1001  * Add a small buffer descriptor to its free list
1002  */
1003 static void
1004 ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
1005     struct bq_desc *sbq_desc)
1006 {
1007 	uint32_t free_idx;
1008 
1009 	/* Add to the end of free list */
1010 	free_idx = rx_ring->sbq_free_tail;
1011 	rx_ring->sbuf_free[free_idx] = sbq_desc;
1012 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
1013 	free_idx++;
1014 	if (free_idx >= rx_ring->sbq_len)
1015 		free_idx = 0;
1016 	rx_ring->sbq_free_tail = free_idx;
1017 	atomic_inc_32(&rx_ring->sbuf_free_count);
1018 }
1019 
1020 /*
1021  * Get a small buffer descriptor from its free list
1022  */
1023 static struct bq_desc *
1024 ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
1025 {
1026 	struct bq_desc *sbq_desc;
1027 	uint32_t free_idx;
1028 
1029 	free_idx = rx_ring->sbq_free_head;
1030 	/* Pick from top of free list */
1031 	sbq_desc = rx_ring->sbuf_free[free_idx];
1032 	rx_ring->sbuf_free[free_idx] = NULL;
1033 	if (sbq_desc != NULL) {
1034 		free_idx++;
1035 		if (free_idx >= rx_ring->sbq_len)
1036 			free_idx = 0;
1037 		rx_ring->sbq_free_head = free_idx;
1038 		atomic_dec_32(&rx_ring->sbuf_free_count);
1039 	}
1040 	return (sbq_desc);
1041 }
1042 
1043 /*
1044  * Add a large buffer descriptor to its in use list
1045  */
1046 static void
1047 ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
1048     struct bq_desc *lbq_desc)
1049 {
1050 	uint32_t inuse_idx;
1051 
1052 	inuse_idx = rx_ring->lbq_use_tail;
1053 
1054 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
1055 	inuse_idx++;
1056 	if (inuse_idx >= rx_ring->lbq_len)
1057 		inuse_idx = 0;
1058 	rx_ring->lbq_use_tail = inuse_idx;
1059 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
1060 }
1061 
1062 /*
1063  * Get a large buffer descriptor from in use list
1064  */
1065 static struct bq_desc *
1066 ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
1067 {
1068 	struct bq_desc *lbq_desc;
1069 	uint32_t inuse_idx;
1070 
1071 	/* Pick from head of in use list */
1072 	inuse_idx = rx_ring->lbq_use_head;
1073 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
1074 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
1075 
1076 	if (lbq_desc != NULL) {
1077 		inuse_idx++;
1078 		if (inuse_idx >= rx_ring->lbq_len)
1079 			inuse_idx = 0;
1080 		rx_ring->lbq_use_head = inuse_idx;
1081 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
1082 		atomic_inc_32(&rx_ring->rx_indicate);
1083 		lbq_desc->upl_inuse = 1;
1084 
1085 		/* if mp is NULL */
1086 		if (lbq_desc->mp == NULL) {
1087 			/* try to remap mp again */
1088 			lbq_desc->mp =
1089 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1090 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1091 		}
1092 	}
1093 	return (lbq_desc);
1094 }
1095 
1096 /*
1097  * Add a large buffer descriptor to free list
1098  */
1099 static void
1100 ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1101     struct bq_desc *lbq_desc)
1102 {
1103 	uint32_t free_idx;
1104 
1105 	/* Add to the end of free list */
1106 	free_idx = rx_ring->lbq_free_tail;
1107 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1108 	free_idx++;
1109 	if (free_idx >= rx_ring->lbq_len)
1110 		free_idx = 0;
1111 	rx_ring->lbq_free_tail = free_idx;
1112 	atomic_inc_32(&rx_ring->lbuf_free_count);
1113 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1114 }
1115 
1116 /*
1117  * Get a large buffer descriptor from its free list
1118  */
1119 static struct bq_desc *
1120 ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1121 {
1122 	struct bq_desc *lbq_desc;
1123 	uint32_t free_idx;
1124 
1125 	free_idx = rx_ring->lbq_free_head;
1126 	/* Pick from head of free list */
1127 	lbq_desc = rx_ring->lbuf_free[free_idx];
1128 	rx_ring->lbuf_free[free_idx] = NULL;
1129 
1130 	if (lbq_desc != NULL) {
1131 		free_idx++;
1132 		if (free_idx >= rx_ring->lbq_len)
1133 			free_idx = 0;
1134 		rx_ring->lbq_free_head = free_idx;
1135 		atomic_dec_32(&rx_ring->lbuf_free_count);
1136 	}
1137 	return (lbq_desc);
1138 }
1139 
1140 /*
1141  * Add a small buffer descriptor to free list
1142  */
1143 static void
1144 ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1145 {
1146 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1147 	uint64_t *sbq_entry;
1148 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1149 	/*
1150 	 * Sync access
1151 	 */
1152 	mutex_enter(&rx_ring->sbq_lock);
1153 
1154 	sbq_desc->upl_inuse = 0;
1155 
1156 	/*
1157 	 * If we are freeing the buffers as a result of adapter unload, get out
1158 	 */
1159 	if ((sbq_desc->free_buf != NULL) ||
1160 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1161 		if (sbq_desc->free_buf == NULL)
1162 			atomic_dec_32(&rx_ring->rx_indicate);
1163 		mutex_exit(&rx_ring->sbq_lock);
1164 		return;
1165 	}
1166 #ifdef QLGE_LOAD_UNLOAD
1167 	if (rx_ring->rx_indicate == 0)
1168 		cmn_err(CE_WARN, "sbq: indicate wrong");
1169 #endif
1170 #ifdef QLGE_TRACK_BUFFER_USAGE
1171 	uint32_t sb_consumer_idx;
1172 	uint32_t sb_producer_idx;
1173 	uint32_t num_free_buffers;
1174 	uint32_t temp;
1175 
1176 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1177 	sb_producer_idx = temp & 0x0000ffff;
1178 	sb_consumer_idx = (temp >> 16);
1179 
1180 	if (sb_consumer_idx > sb_producer_idx)
1181 		num_free_buffers = NUM_SMALL_BUFFERS -
1182 		    (sb_consumer_idx - sb_producer_idx);
1183 	else
1184 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1185 
1186 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1187 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1188 
1189 #endif
1190 
1191 #ifdef QLGE_LOAD_UNLOAD
1192 	if (rx_ring->rx_indicate > 0xFF000000)
1193 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1194 		    " sbq_desc index %d.",
1195 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1196 		    sbq_desc->index);
1197 #endif
1198 	if (alloc_memory) {
1199 		sbq_desc->mp =
1200 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1201 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1202 		if (sbq_desc->mp == NULL) {
1203 			rx_ring->rx_failed_sbq_allocs++;
1204 		}
1205 	}
1206 
1207 	/* Got the packet from the stack decrement rx_indicate count */
1208 	atomic_dec_32(&rx_ring->rx_indicate);
1209 
1210 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1211 
1212 	/* Rearm if possible */
1213 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1214 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1215 		sbq_entry = rx_ring->sbq_dma.vaddr;
1216 		sbq_entry += rx_ring->sbq_prod_idx;
1217 
1218 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1219 			/* Get first one from free list */
1220 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1221 
1222 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1223 			sbq_entry++;
1224 			rx_ring->sbq_prod_idx++;
1225 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1226 				rx_ring->sbq_prod_idx = 0;
1227 				sbq_entry = rx_ring->sbq_dma.vaddr;
1228 			}
1229 			/* Add to end of in use list */
1230 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1231 		}
1232 
1233 		/* Update small buffer queue producer index */
1234 		ql_update_sbq_prod_idx(qlge, rx_ring);
1235 	}
1236 
1237 	mutex_exit(&rx_ring->sbq_lock);
1238 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1239 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1240 }
1241 
1242 /*
1243  * rx recycle call back function
1244  */
1245 static void
1246 ql_release_to_sbuf_free_list(caddr_t p)
1247 {
1248 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1249 
1250 	if (sbq_desc == NULL)
1251 		return;
1252 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1253 }
1254 
1255 /*
1256  * Add a large buffer descriptor to free list
1257  */
1258 static void
1259 ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1260 {
1261 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1262 	uint64_t *lbq_entry;
1263 	qlge_t *qlge = rx_ring->qlge;
1264 
1265 	/* Sync access */
1266 	mutex_enter(&rx_ring->lbq_lock);
1267 
1268 	lbq_desc->upl_inuse = 0;
1269 	/*
1270 	 * If we are freeing the buffers as a result of adapter unload, get out
1271 	 */
1272 	if ((lbq_desc->free_buf != NULL) ||
1273 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1274 		if (lbq_desc->free_buf == NULL)
1275 			atomic_dec_32(&rx_ring->rx_indicate);
1276 		mutex_exit(&rx_ring->lbq_lock);
1277 		return;
1278 	}
1279 #ifdef QLGE_LOAD_UNLOAD
1280 	if (rx_ring->rx_indicate == 0)
1281 		cmn_err(CE_WARN, "lbq: indicate wrong");
1282 #endif
1283 #ifdef QLGE_TRACK_BUFFER_USAGE
1284 	uint32_t lb_consumer_idx;
1285 	uint32_t lb_producer_idx;
1286 	uint32_t num_free_buffers;
1287 	uint32_t temp;
1288 
1289 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1290 
1291 	lb_producer_idx = temp & 0x0000ffff;
1292 	lb_consumer_idx = (temp >> 16);
1293 
1294 	if (lb_consumer_idx > lb_producer_idx)
1295 		num_free_buffers = NUM_LARGE_BUFFERS -
1296 		    (lb_consumer_idx - lb_producer_idx);
1297 	else
1298 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1299 
1300 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1301 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1302 	}
1303 #endif
1304 
1305 #ifdef QLGE_LOAD_UNLOAD
1306 	if (rx_ring->rx_indicate > 0xFF000000)
1307 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1308 		    "lbq_desc index %d",
1309 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1310 		    lbq_desc->index);
1311 #endif
1312 	if (alloc_memory) {
1313 		lbq_desc->mp =
1314 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1315 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1316 		if (lbq_desc->mp == NULL) {
1317 			rx_ring->rx_failed_lbq_allocs++;
1318 		}
1319 	}
1320 
1321 	/* Got the packet from the stack decrement rx_indicate count */
1322 	atomic_dec_32(&rx_ring->rx_indicate);
1323 
1324 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1325 
1326 	/* Rearm if possible */
1327 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1328 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1329 		lbq_entry = rx_ring->lbq_dma.vaddr;
1330 		lbq_entry += rx_ring->lbq_prod_idx;
1331 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1332 			/* Get first one from free list */
1333 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1334 
1335 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1336 			lbq_entry++;
1337 			rx_ring->lbq_prod_idx++;
1338 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1339 				rx_ring->lbq_prod_idx = 0;
1340 				lbq_entry = rx_ring->lbq_dma.vaddr;
1341 			}
1342 
1343 			/* Add to end of in use list */
1344 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1345 		}
1346 
1347 		/* Update large buffer queue producer index */
1348 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1349 	}
1350 
1351 	mutex_exit(&rx_ring->lbq_lock);
1352 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1353 	    __func__, rx_ring->lbuf_free_count));
1354 }
1355 /*
1356  * rx recycle call back function
1357  */
1358 static void
1359 ql_release_to_lbuf_free_list(caddr_t p)
1360 {
1361 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1362 
1363 	if (lbq_desc == NULL)
1364 		return;
1365 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1366 }
1367 
1368 /*
1369  * free small buffer queue buffers
1370  */
1371 static void
1372 ql_free_sbq_buffers(struct rx_ring *rx_ring)
1373 {
1374 	struct bq_desc *sbq_desc;
1375 	uint32_t i;
1376 	uint32_t j = rx_ring->sbq_free_head;
1377 	int  force_cnt = 0;
1378 
1379 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1380 		sbq_desc = rx_ring->sbuf_free[j];
1381 		sbq_desc->free_buf = 1;
1382 		j++;
1383 		if (j >= rx_ring->sbq_len) {
1384 			j = 0;
1385 		}
1386 		if (sbq_desc->mp != NULL) {
1387 			freemsg(sbq_desc->mp);
1388 			sbq_desc->mp = NULL;
1389 		}
1390 	}
1391 	rx_ring->sbuf_free_count = 0;
1392 
1393 	j = rx_ring->sbq_use_head;
1394 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1395 		sbq_desc = rx_ring->sbuf_in_use[j];
1396 		sbq_desc->free_buf = 1;
1397 		j++;
1398 		if (j >= rx_ring->sbq_len) {
1399 			j = 0;
1400 		}
1401 		if (sbq_desc->mp != NULL) {
1402 			freemsg(sbq_desc->mp);
1403 			sbq_desc->mp = NULL;
1404 		}
1405 	}
1406 	rx_ring->sbuf_in_use_count = 0;
1407 
1408 	sbq_desc = &rx_ring->sbq_desc[0];
1409 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1410 		/*
1411 		 * Set flag so that the callback does not allocate a new buffer
1412 		 */
1413 		sbq_desc->free_buf = 1;
1414 		if (sbq_desc->upl_inuse != 0) {
1415 			force_cnt++;
1416 		}
1417 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1418 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1419 			    &sbq_desc->bd_dma.acc_handle);
1420 			sbq_desc->bd_dma.dma_handle = NULL;
1421 			sbq_desc->bd_dma.acc_handle = NULL;
1422 		}
1423 	}
1424 #ifdef QLGE_LOAD_UNLOAD
1425 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1426 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1427 #endif
1428 	if (rx_ring->sbuf_in_use != NULL) {
1429 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1430 		    sizeof (struct bq_desc *)));
1431 		rx_ring->sbuf_in_use = NULL;
1432 	}
1433 
1434 	if (rx_ring->sbuf_free != NULL) {
1435 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1436 		    sizeof (struct bq_desc *)));
1437 		rx_ring->sbuf_free = NULL;
1438 	}
1439 }
1440 
1441 /* Allocate small buffers */
1442 static int
1443 ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1444 {
1445 	struct bq_desc *sbq_desc;
1446 	int i;
1447 	ddi_dma_cookie_t dma_cookie;
1448 
1449 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1450 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1451 	if (rx_ring->sbuf_free == NULL) {
1452 		cmn_err(CE_WARN,
1453 		    "!%s: sbuf_free_list alloc: failed",
1454 		    __func__);
1455 		rx_ring->sbuf_free_count = 0;
1456 		goto alloc_sbuf_err;
1457 	}
1458 
1459 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1460 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1461 	if (rx_ring->sbuf_in_use == NULL) {
1462 		cmn_err(CE_WARN,
1463 		    "!%s: sbuf_inuse_list alloc: failed",
1464 		    __func__);
1465 		rx_ring->sbuf_in_use_count = 0;
1466 		goto alloc_sbuf_err;
1467 	}
1468 	rx_ring->sbq_use_head = 0;
1469 	rx_ring->sbq_use_tail = 0;
1470 	rx_ring->sbq_free_head = 0;
1471 	rx_ring->sbq_free_tail = 0;
1472 	sbq_desc = &rx_ring->sbq_desc[0];
1473 
1474 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1475 		/* Allocate buffer */
1476 		if (ql_alloc_phys_rbuf(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1477 		    &ql_buf_acc_attr,
1478 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1479 		    &sbq_desc->bd_dma.acc_handle,
1480 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1481 		    (size_t)0,				/* default alignment */
1482 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1483 		    &dma_cookie) != 0) {
1484 			cmn_err(CE_WARN,
1485 			    "!%s: ddi_dma_alloc_handle: failed",
1486 			    __func__);
1487 			goto alloc_sbuf_err;
1488 		}
1489 
1490 		/* Set context for Return buffer callback */
1491 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1492 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1493 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1494 		sbq_desc->rx_ring = rx_ring;
1495 		sbq_desc->upl_inuse = 0;
1496 		sbq_desc->free_buf = 0;
1497 
1498 		sbq_desc->mp =
1499 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1500 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1501 		if (sbq_desc->mp == NULL) {
1502 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1503 			goto alloc_sbuf_err;
1504 		}
1505 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1506 	}
1507 
1508 	return (DDI_SUCCESS);
1509 
1510 alloc_sbuf_err:
1511 	ql_free_sbq_buffers(rx_ring);
1512 	return (DDI_FAILURE);
1513 }
1514 
1515 static void
1516 ql_free_lbq_buffers(struct rx_ring *rx_ring)
1517 {
1518 	struct bq_desc *lbq_desc;
1519 	uint32_t i, j;
1520 	int force_cnt = 0;
1521 
1522 	j = rx_ring->lbq_free_head;
1523 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1524 		lbq_desc = rx_ring->lbuf_free[j];
1525 		lbq_desc->free_buf = 1;
1526 		j++;
1527 		if (j >= rx_ring->lbq_len)
1528 			j = 0;
1529 		if (lbq_desc->mp != NULL) {
1530 			freemsg(lbq_desc->mp);
1531 			lbq_desc->mp = NULL;
1532 		}
1533 	}
1534 	rx_ring->lbuf_free_count = 0;
1535 
1536 	j = rx_ring->lbq_use_head;
1537 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1538 		lbq_desc = rx_ring->lbuf_in_use[j];
1539 		lbq_desc->free_buf = 1;
1540 		j++;
1541 		if (j >= rx_ring->lbq_len) {
1542 			j = 0;
1543 		}
1544 		if (lbq_desc->mp != NULL) {
1545 			freemsg(lbq_desc->mp);
1546 			lbq_desc->mp = NULL;
1547 		}
1548 	}
1549 	rx_ring->lbuf_in_use_count = 0;
1550 
1551 	lbq_desc = &rx_ring->lbq_desc[0];
1552 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1553 		/* Set flag so that callback will not allocate a new buffer */
1554 		lbq_desc->free_buf = 1;
1555 		if (lbq_desc->upl_inuse != 0) {
1556 			force_cnt++;
1557 		}
1558 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1559 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1560 			    &lbq_desc->bd_dma.acc_handle);
1561 			lbq_desc->bd_dma.dma_handle = NULL;
1562 			lbq_desc->bd_dma.acc_handle = NULL;
1563 		}
1564 	}
1565 #ifdef QLGE_LOAD_UNLOAD
1566 	if (force_cnt) {
1567 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1568 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1569 		    force_cnt);
1570 	}
1571 #endif
1572 	if (rx_ring->lbuf_in_use != NULL) {
1573 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1574 		    sizeof (struct bq_desc *)));
1575 		rx_ring->lbuf_in_use = NULL;
1576 	}
1577 
1578 	if (rx_ring->lbuf_free != NULL) {
1579 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1580 		    sizeof (struct bq_desc *)));
1581 		rx_ring->lbuf_free = NULL;
1582 	}
1583 }
1584 
1585 /* Allocate large buffers */
1586 static int
1587 ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1588 {
1589 	struct bq_desc *lbq_desc;
1590 	ddi_dma_cookie_t dma_cookie;
1591 	int i;
1592 	uint32_t lbq_buf_size;
1593 
1594 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1595 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1596 	if (rx_ring->lbuf_free == NULL) {
1597 		cmn_err(CE_WARN,
1598 		    "!%s: lbuf_free_list alloc: failed",
1599 		    __func__);
1600 		rx_ring->lbuf_free_count = 0;
1601 		goto alloc_lbuf_err;
1602 	}
1603 
1604 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1605 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1606 
1607 	if (rx_ring->lbuf_in_use == NULL) {
1608 		cmn_err(CE_WARN,
1609 		    "!%s: lbuf_inuse_list alloc: failed",
1610 		    __func__);
1611 		rx_ring->lbuf_in_use_count = 0;
1612 		goto alloc_lbuf_err;
1613 	}
1614 	rx_ring->lbq_use_head = 0;
1615 	rx_ring->lbq_use_tail = 0;
1616 	rx_ring->lbq_free_head = 0;
1617 	rx_ring->lbq_free_tail = 0;
1618 
1619 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1620 	    LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE;
1621 
1622 	lbq_desc = &rx_ring->lbq_desc[0];
1623 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1624 		rx_ring->lbq_buf_size = lbq_buf_size;
1625 		/* Allocate buffer */
1626 		if (ql_alloc_phys_rbuf(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1627 		    &ql_buf_acc_attr,
1628 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1629 		    &lbq_desc->bd_dma.acc_handle,
1630 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1631 		    (size_t)0, /* default alignment */
1632 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1633 		    &dma_cookie) != 0) {
1634 			cmn_err(CE_WARN,
1635 			    "!%s: ddi_dma_alloc_handle: failed",
1636 			    __func__);
1637 			goto alloc_lbuf_err;
1638 		}
1639 
1640 		/* Set context for Return buffer callback */
1641 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1642 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1643 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1644 		lbq_desc->rx_ring = rx_ring;
1645 		lbq_desc->upl_inuse = 0;
1646 		lbq_desc->free_buf = 0;
1647 
1648 		lbq_desc->mp =
1649 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1650 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1651 		if (lbq_desc->mp == NULL) {
1652 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1653 			goto alloc_lbuf_err;
1654 		}
1655 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1656 	} /* For all large buffers */
1657 
1658 	return (DDI_SUCCESS);
1659 
1660 alloc_lbuf_err:
1661 	ql_free_lbq_buffers(rx_ring);
1662 	return (DDI_FAILURE);
1663 }
1664 
1665 /*
1666  * Free rx buffers
1667  */
1668 static void
1669 ql_free_rx_buffers(qlge_t *qlge)
1670 {
1671 	int i;
1672 	struct rx_ring *rx_ring;
1673 
1674 	for (i = 0; i < qlge->rx_ring_count; i++) {
1675 		rx_ring = &qlge->rx_ring[i];
1676 		if (rx_ring->type != TX_Q) {
1677 			ql_free_lbq_buffers(rx_ring);
1678 			ql_free_sbq_buffers(rx_ring);
1679 		}
1680 	}
1681 }
1682 
1683 /*
1684  * Allocate rx buffers
1685  */
1686 static int
1687 ql_alloc_rx_buffers(qlge_t *qlge)
1688 {
1689 	struct rx_ring *rx_ring;
1690 	int i;
1691 
1692 	for (i = 0; i < qlge->rx_ring_count; i++) {
1693 		rx_ring = &qlge->rx_ring[i];
1694 		if (rx_ring->type != TX_Q) {
1695 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1696 				goto alloc_err;
1697 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1698 				goto alloc_err;
1699 		}
1700 	}
1701 #ifdef QLGE_TRACK_BUFFER_USAGE
1702 	for (i = 0; i < qlge->rx_ring_count; i++) {
1703 		if (qlge->rx_ring[i].type == RX_Q) {
1704 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1705 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1706 		}
1707 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1708 	}
1709 #endif
1710 	return (DDI_SUCCESS);
1711 
1712 alloc_err:
1713 
1714 	return (DDI_FAILURE);
1715 }
1716 
1717 /*
1718  * Initialize large buffer queue ring
1719  */
1720 static void
1721 ql_init_lbq_ring(struct rx_ring *rx_ring)
1722 {
1723 	uint16_t i;
1724 	struct bq_desc *lbq_desc;
1725 
1726 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1727 	for (i = 0; i < rx_ring->lbq_len; i++) {
1728 		lbq_desc = &rx_ring->lbq_desc[i];
1729 		lbq_desc->index = i;
1730 	}
1731 }
1732 
1733 /*
1734  * Initialize small buffer queue ring
1735  */
1736 static void
1737 ql_init_sbq_ring(struct rx_ring *rx_ring)
1738 {
1739 	uint16_t i;
1740 	struct bq_desc *sbq_desc;
1741 
1742 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1743 	for (i = 0; i < rx_ring->sbq_len; i++) {
1744 		sbq_desc = &rx_ring->sbq_desc[i];
1745 		sbq_desc->index = i;
1746 	}
1747 }
1748 
1749 /*
1750  * Calculate the pseudo-header checksum if hardware can not do
1751  */
1752 static void
1753 ql_pseudo_cksum(uint8_t *buf)
1754 {
1755 	uint32_t cksum;
1756 	uint16_t iphl;
1757 	uint16_t proto;
1758 
1759 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1760 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1761 	cksum += proto = buf[9];
1762 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1763 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1764 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1765 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1766 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1767 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1768 
1769 	/*
1770 	 * Point it to the TCP/UDP header, and
1771 	 * update the checksum field.
1772 	 */
1773 	buf += iphl + ((proto == IPPROTO_TCP) ?
1774 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1775 
1776 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1777 
1778 }
1779 
1780 /*
1781  * Transmit an incoming packet.
1782  */
1783 mblk_t *
1784 ql_ring_tx(void *arg, mblk_t *mp)
1785 {
1786 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1787 	qlge_t *qlge = tx_ring->qlge;
1788 	mblk_t *next;
1789 	int rval;
1790 	uint32_t tx_count = 0;
1791 
1792 	if (qlge->port_link_state == LS_DOWN) {
1793 		/* can not send message while link is down */
1794 		mblk_t *tp;
1795 
1796 		while (mp != NULL) {
1797 			tp = mp->b_next;
1798 			mp->b_next = NULL;
1799 			freemsg(mp);
1800 			mp = tp;
1801 		}
1802 		goto exit;
1803 	}
1804 
1805 	mutex_enter(&tx_ring->tx_lock);
1806 	/* if mac is not started, driver is not ready, can not send */
1807 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1808 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1809 		    " return packets",
1810 		    __func__, qlge->instance, tx_ring->mac_flags);
1811 		mutex_exit(&tx_ring->tx_lock);
1812 		goto exit;
1813 	}
1814 
1815 	/* we must try to send all */
1816 	while (mp != NULL) {
1817 		/*
1818 		 * if number of available slots is less than a threshold,
1819 		 * then quit
1820 		 */
1821 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1822 			tx_ring->queue_stopped = 1;
1823 			rval = DDI_FAILURE;
1824 #ifdef QLGE_LOAD_UNLOAD
1825 			cmn_err(CE_WARN, "%s(%d) no resources",
1826 			    __func__, qlge->instance);
1827 #endif
1828 			tx_ring->defer++;
1829 			/*
1830 			 * If we return the buffer back we are expected to call
1831 			 * mac_tx_ring_update() when resources are available
1832 			 */
1833 			break;
1834 		}
1835 
1836 		next = mp->b_next;
1837 		mp->b_next = NULL;
1838 
1839 		rval = ql_send_common(tx_ring, mp);
1840 
1841 		if (rval != DDI_SUCCESS) {
1842 			mp->b_next = next;
1843 			break;
1844 		}
1845 		tx_count++;
1846 		mp = next;
1847 	}
1848 
1849 	/*
1850 	 * After all msg blocks are mapped or copied to tx buffer,
1851 	 * trigger the hardware to send!
1852 	 */
1853 	if (tx_count > 0) {
1854 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1855 		    tx_ring->prod_idx);
1856 	}
1857 
1858 	mutex_exit(&tx_ring->tx_lock);
1859 exit:
1860 	return (mp);
1861 }
1862 
1863 
1864 /*
1865  * This function builds an mblk list for the given inbound
1866  * completion.
1867  */
1868 
1869 static mblk_t *
1870 ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1871     struct ib_mac_iocb_rsp *ib_mac_rsp)
1872 {
1873 	mblk_t *mp = NULL;
1874 	mblk_t *mp1 = NULL;	/* packet header */
1875 	mblk_t *mp2 = NULL;	/* packet content */
1876 	struct bq_desc *lbq_desc;
1877 	struct bq_desc *sbq_desc;
1878 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1879 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1880 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1881 	uint32_t pkt_len = payload_len + header_len;
1882 	uint32_t done;
1883 	uint64_t *curr_ial_ptr;
1884 	uint32_t ial_data_addr_low;
1885 	uint32_t actual_data_addr_low;
1886 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1887 	uint32_t size;
1888 	uint32_t cp_offset;
1889 	boolean_t rx_copy = B_FALSE;
1890 	mblk_t *tp = NULL;
1891 
1892 	/*
1893 	 * Check if error flags are set
1894 	 */
1895 	if (err_flag != 0) {
1896 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1897 			rx_ring->frame_too_long++;
1898 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1899 			rx_ring->frame_too_short++;
1900 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1901 			rx_ring->fcs_err++;
1902 #ifdef QLGE_LOAD_UNLOAD
1903 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1904 #endif
1905 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1906 		    (uint8_t *)ib_mac_rsp, 8,
1907 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1908 	}
1909 
1910 	/* header should not be in large buffer */
1911 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1912 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1913 		err_flag |= 1;
1914 	}
1915 	/* if whole packet is too big than rx buffer size */
1916 	if (pkt_len > qlge->max_frame_size) {
1917 		cmn_err(CE_WARN, "ql_build_rx_mpframe too long(%d)!", pkt_len);
1918 		err_flag |= 1;
1919 	}
1920 	if (qlge->rx_copy ||
1921 	    (rx_ring->sbuf_in_use_count <= qlge->rx_copy_threshold) ||
1922 	    (rx_ring->lbuf_in_use_count <= qlge->rx_copy_threshold)) {
1923 		rx_copy = B_TRUE;
1924 	}
1925 
1926 	/* if using rx copy mode, we need to allocate a big enough buffer */
1927 	if (rx_copy) {
1928 		qlge->stats.norcvbuf++;
1929 		tp = allocb(payload_len + header_len + qlge->ip_hdr_offset,
1930 		    BPRI_MED);
1931 		if (tp == NULL) {
1932 			cmn_err(CE_WARN, "rx copy failed to allocate memory");
1933 		} else {
1934 			tp->b_rptr += qlge->ip_hdr_offset;
1935 		}
1936 	}
1937 	/*
1938 	 * Handle the header buffer if present.
1939 	 * packet header must be valid and saved in one small buffer
1940 	 * broadcast/multicast packets' headers not splitted
1941 	 */
1942 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1943 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1944 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1945 		    header_len));
1946 		/* Sync access */
1947 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1948 
1949 		ASSERT(sbq_desc != NULL);
1950 
1951 		/*
1952 		 * Validate addresses from the ASIC with the
1953 		 * expected sbuf address
1954 		 */
1955 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1956 		    != ib_mac_rsp->hdr_addr) {
1957 			/* Small buffer address mismatch */
1958 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1959 			    " in wrong small buffer",
1960 			    __func__, qlge->instance, rx_ring->cq_id);
1961 			goto fatal_error;
1962 		}
1963 		/* get this packet */
1964 		mp1 = sbq_desc->mp;
1965 		/* Flush DMA'd data */
1966 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1967 		    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1968 
1969 		if ((err_flag != 0)|| (mp1 == NULL)) {
1970 			/* failed on this packet, put it back for re-arming */
1971 #ifdef QLGE_LOAD_UNLOAD
1972 			cmn_err(CE_WARN, "get header from small buffer fail");
1973 #endif
1974 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1975 			mp1 = NULL;
1976 		} else if (rx_copy) {
1977 			if (tp != NULL) {
1978 				bcopy(sbq_desc->bd_dma.vaddr, tp->b_rptr,
1979 				    header_len);
1980 			}
1981 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1982 			mp1 = NULL;
1983 		} else {
1984 			if ((qlge->ip_hdr_offset != 0)&&
1985 			    (header_len < SMALL_BUFFER_SIZE)) {
1986 				/*
1987 				 * copy entire header to a 2 bytes boundary
1988 				 * address for 8100 adapters so that the IP
1989 				 * header can be on a 4 byte boundary address
1990 				 */
1991 				bcopy(mp1->b_rptr,
1992 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1993 				    qlge->ip_hdr_offset),
1994 				    header_len);
1995 				mp1->b_rptr += SMALL_BUFFER_SIZE +
1996 				    qlge->ip_hdr_offset;
1997 			}
1998 
1999 			/*
2000 			 * Adjust the mp payload_len to match
2001 			 * the packet header payload_len
2002 			 */
2003 			mp1->b_wptr = mp1->b_rptr + header_len;
2004 			mp1->b_next = mp1->b_cont = NULL;
2005 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
2006 			    (uint8_t *)mp1->b_rptr, 8, header_len);
2007 		}
2008 	}
2009 
2010 	/*
2011 	 * packet data or whole packet can be in small or one or
2012 	 * several large buffer(s)
2013 	 */
2014 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2015 		/*
2016 		 * The data is in a single small buffer.
2017 		 */
2018 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2019 
2020 		ASSERT(sbq_desc != NULL);
2021 
2022 		QL_PRINT(DBG_RX,
2023 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
2024 		    "sbq_desc->bd_dma.dma_addr = %x,"
2025 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2026 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
2027 		    ib_mac_rsp->data_addr, sbq_desc->mp));
2028 
2029 		/*
2030 		 * Validate  addresses from the ASIC with the
2031 		 * expected sbuf address
2032 		 */
2033 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
2034 		    != ib_mac_rsp->data_addr) {
2035 			/* Small buffer address mismatch */
2036 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2037 			    " in wrong small buffer",
2038 			    __func__, qlge->instance, rx_ring->cq_id);
2039 			goto fatal_error;
2040 		}
2041 		/* get this packet */
2042 		mp2 = sbq_desc->mp;
2043 		(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
2044 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2045 		if ((err_flag != 0) || (mp2 == NULL)) {
2046 #ifdef QLGE_LOAD_UNLOAD
2047 			/* failed on this packet, put it back for re-arming */
2048 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2049 #endif
2050 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2051 			mp2 = NULL;
2052 		} else if (rx_copy) {
2053 			if (tp != NULL) {
2054 				bcopy(sbq_desc->bd_dma.vaddr,
2055 				    tp->b_rptr + header_len, payload_len);
2056 				tp->b_wptr =
2057 				    tp->b_rptr + header_len + payload_len;
2058 			}
2059 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2060 			mp2 = NULL;
2061 		} else {
2062 			/* Adjust the buffer length to match the payload_len */
2063 			mp2->b_wptr = mp2->b_rptr + payload_len;
2064 			mp2->b_next = mp2->b_cont = NULL;
2065 			/* Flush DMA'd data */
2066 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2067 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2068 			/*
2069 			 * if payload is too small , copy to
2070 			 * the end of packet header
2071 			 */
2072 			if ((mp1 != NULL) &&
2073 			    (payload_len <= qlge->payload_copy_thresh) &&
2074 			    (pkt_len <
2075 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2076 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2077 				mp1->b_wptr += payload_len;
2078 				freemsg(mp2);
2079 				mp2 = NULL;
2080 			}
2081 		}
2082 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2083 		/*
2084 		 * The data is in a single large buffer.
2085 		 */
2086 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2087 
2088 		QL_PRINT(DBG_RX,
2089 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
2090 		    "lbq_desc->bd_dma.dma_addr = %x,"
2091 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2092 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
2093 		    ib_mac_rsp->data_addr, lbq_desc->mp));
2094 
2095 		ASSERT(lbq_desc != NULL);
2096 
2097 		/*
2098 		 * Validate  addresses from the ASIC with
2099 		 * the expected lbuf address
2100 		 */
2101 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
2102 		    != ib_mac_rsp->data_addr) {
2103 			/* Large buffer address mismatch */
2104 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2105 			    " in wrong large buffer",
2106 			    __func__, qlge->instance, rx_ring->cq_id);
2107 			goto fatal_error;
2108 		}
2109 		mp2 = lbq_desc->mp;
2110 		/* Flush DMA'd data */
2111 		(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2112 		    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2113 		if ((err_flag != 0) || (mp2 == NULL)) {
2114 #ifdef QLGE_LOAD_UNLOAD
2115 			cmn_err(CE_WARN, "ignore bad data from large buffer");
2116 #endif
2117 			/* failed on this packet, put it back for re-arming */
2118 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2119 			mp2 = NULL;
2120 		} else if (rx_copy) {
2121 			if (tp != NULL) {
2122 				bcopy(lbq_desc->bd_dma.vaddr,
2123 				    tp->b_rptr + header_len, payload_len);
2124 				tp->b_wptr =
2125 				    tp->b_rptr + header_len + payload_len;
2126 			}
2127 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2128 			mp2 = NULL;
2129 		} else {
2130 			/*
2131 			 * Adjust the buffer length to match
2132 			 * the packet payload_len
2133 			 */
2134 			mp2->b_wptr = mp2->b_rptr + payload_len;
2135 			mp2->b_next = mp2->b_cont = NULL;
2136 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2137 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2138 			/*
2139 			 * if payload is too small , copy to
2140 			 * the end of packet header
2141 			 */
2142 			if ((mp1 != NULL) &&
2143 			    (payload_len <= qlge->payload_copy_thresh) &&
2144 			    (pkt_len<
2145 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2146 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2147 				mp1->b_wptr += payload_len;
2148 				freemsg(mp2);
2149 				mp2 = NULL;
2150 			}
2151 		}
2152 	} else if (payload_len) { /* ial case */
2153 		/*
2154 		 * payload available but not in sml nor lrg buffer,
2155 		 * so, it is saved in IAL
2156 		 */
2157 #ifdef QLGE_LOAD_UNLOAD
2158 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2159 #endif
2160 		/* lrg buf addresses are saved in one small buffer */
2161 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2162 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2163 		done = 0;
2164 		cp_offset = 0;
2165 
2166 		while (!done) {
2167 			ial_data_addr_low =
2168 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2169 			    0xFFFFFFFE);
2170 			/* check if this is the last packet fragment */
2171 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2172 			curr_ial_ptr++;
2173 			/*
2174 			 * The data is in one or several large buffer(s).
2175 			 */
2176 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2177 			actual_data_addr_low =
2178 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2179 			    0xFFFFFFFE);
2180 			if (ial_data_addr_low != actual_data_addr_low) {
2181 				cmn_err(CE_WARN,
2182 				    "packet saved in wrong ial lrg buffer"
2183 				    " expected %x, actual %lx",
2184 				    ial_data_addr_low,
2185 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2186 				goto fatal_error;
2187 			}
2188 
2189 			size = (payload_len < rx_ring->lbq_buf_size)?
2190 			    payload_len : rx_ring->lbq_buf_size;
2191 			payload_len -= size;
2192 			mp2 = lbq_desc->mp;
2193 			if ((err_flag != 0) || (mp2 == NULL)) {
2194 #ifdef QLGE_LOAD_UNLOAD
2195 				cmn_err(CE_WARN,
2196 				    "ignore bad data from large buffer");
2197 #endif
2198 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2199 				mp2 = NULL;
2200 			} else if (rx_copy) {
2201 				if (tp != NULL) {
2202 					(void) ddi_dma_sync(
2203 					    lbq_desc->bd_dma.dma_handle,
2204 					    0, size, DDI_DMA_SYNC_FORKERNEL);
2205 					bcopy(lbq_desc->bd_dma.vaddr,
2206 					    tp->b_rptr + header_len + cp_offset,
2207 					    size);
2208 					tp->b_wptr =
2209 					    tp->b_rptr + size + cp_offset +
2210 					    header_len;
2211 					cp_offset += size;
2212 				}
2213 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2214 				mp2 = NULL;
2215 			} else {
2216 				if (mp_ial == NULL) {
2217 					mp_ial = mp2;
2218 				} else {
2219 					linkb(mp_ial, mp2);
2220 				}
2221 
2222 				mp2->b_next = NULL;
2223 				mp2->b_cont = NULL;
2224 				mp2->b_wptr = mp2->b_rptr + size;
2225 				/* Flush DMA'd data */
2226 				(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2227 				    0, size, DDI_DMA_SYNC_FORKERNEL);
2228 				QL_PRINT(DBG_RX, ("ial %d payload received \n",
2229 				    size));
2230 				QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2231 				    (uint8_t *)mp2->b_rptr, 8, size);
2232 			}
2233 		}
2234 		if (err_flag != 0) {
2235 #ifdef QLGE_LOAD_UNLOAD
2236 			/* failed on this packet, put it back for re-arming */
2237 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2238 #endif
2239 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2240 		} else {
2241 			mp2 = mp_ial;
2242 			freemsg(sbq_desc->mp);
2243 		}
2244 	}
2245 	/*
2246 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2247 	 * concatenate message block mp2 to the tail of message header, mp1
2248 	 */
2249 	if (!err_flag) {
2250 		if (rx_copy) {
2251 			if (tp != NULL) {
2252 				tp->b_next = NULL;
2253 				tp->b_cont = NULL;
2254 				tp->b_wptr = tp->b_rptr +
2255 				    header_len + payload_len;
2256 			}
2257 			mp = tp;
2258 		} else {
2259 			if (mp1) {
2260 				if (mp2) {
2261 					QL_PRINT(DBG_RX,
2262 					    ("packet in mp1 and mp2\n"));
2263 					/* mp1->b_cont = mp2; */
2264 					linkb(mp1, mp2);
2265 					mp = mp1;
2266 				} else {
2267 					QL_PRINT(DBG_RX,
2268 					    ("packet in mp1 only\n"));
2269 					mp = mp1;
2270 				}
2271 			} else if (mp2) {
2272 				QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2273 				mp = mp2;
2274 			}
2275 		}
2276 	}
2277 	return (mp);
2278 
2279 fatal_error:
2280 	/* fatal Error! */
2281 	if (qlge->fm_enable) {
2282 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2283 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2284 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2285 	}
2286 	if (tp) {
2287 		freemsg(tp);
2288 	}
2289 
2290 	/* *mp->b_wptr = 0; */
2291 	ql_wake_asic_reset_soft_intr(qlge);
2292 	return (NULL);
2293 
2294 }
2295 
2296 /*
2297  * Bump completion queue consumer index.
2298  */
2299 static void
2300 ql_update_cq(struct rx_ring *rx_ring)
2301 {
2302 	rx_ring->cnsmr_idx++;
2303 	rx_ring->curr_entry++;
2304 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2305 		rx_ring->cnsmr_idx = 0;
2306 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2307 	}
2308 }
2309 
2310 /*
2311  * Update completion queue consumer index.
2312  */
2313 static void
2314 ql_write_cq_idx(struct rx_ring *rx_ring)
2315 {
2316 	qlge_t *qlge = rx_ring->qlge;
2317 
2318 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2319 	    rx_ring->cnsmr_idx);
2320 }
2321 
2322 /*
2323  * Processes a SYS-Chip Event Notification Completion Event.
2324  * The incoming notification event that describes a link up/down
2325  * or some sorts of error happens.
2326  */
2327 static void
2328 ql_process_chip_ae_intr(qlge_t *qlge,
2329     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2330 {
2331 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2332 	uint32_t soft_req = 0;
2333 
2334 	switch (eventType) {
2335 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2336 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2337 			break;
2338 
2339 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2340 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2341 			break;
2342 
2343 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2344 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2345 			    "occurred");
2346 			soft_req |= NEED_HW_RESET;
2347 			break;
2348 
2349 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2350 			cmn_err(CE_WARN, "Soft ECC error detected");
2351 			soft_req |= NEED_HW_RESET;
2352 			break;
2353 
2354 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2355 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2356 			    " error occured");
2357 			soft_req |= NEED_MPI_RESET;
2358 			break;
2359 
2360 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2361 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2362 			break;
2363 
2364 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2365 			cmn_err(CE_WARN, "PCI Error reading small/large "
2366 			    "buffers occured");
2367 			soft_req |= NEED_HW_RESET;
2368 			break;
2369 
2370 		default:
2371 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2372 			    "type 0x%x occured",
2373 			    __func__, qlge->instance, eventType));
2374 			break;
2375 	}
2376 
2377 	if ((soft_req & NEED_MPI_RESET) != 0) {
2378 		ql_wake_mpi_reset_soft_intr(qlge);
2379 		if (qlge->fm_enable) {
2380 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2381 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2382 		}
2383 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2384 		ql_wake_asic_reset_soft_intr(qlge);
2385 		if (qlge->fm_enable) {
2386 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2387 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2388 		}
2389 	}
2390 }
2391 
2392 /*
2393  * set received packet checksum flag
2394  */
2395 void
2396 ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2397 {
2398 	uint32_t flags;
2399 
2400 	/* Not TCP or UDP packet? nothing more to do */
2401 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2402 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2403 	return;
2404 
2405 	/* No CKO support for IPv6 */
2406 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2407 		return;
2408 
2409 	/*
2410 	 * If checksum error, don't set flags; stack will calculate
2411 	 * checksum, detect the error and update statistics
2412 	 */
2413 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2414 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2415 		return;
2416 
2417 	/* TCP or UDP packet and checksum valid */
2418 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2419 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2420 		flags = HCK_FULLCKSUM_OK;
2421 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2422 	}
2423 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2424 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2425 		flags = HCK_FULLCKSUM_OK;
2426 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2427 	}
2428 }
2429 
2430 /*
2431  * This function goes through h/w descriptor in one specified rx ring,
2432  * receives the data if the descriptor status shows the data is ready.
2433  * It returns a chain of mblks containing the received data, to be
2434  * passed up to mac_rx_ring().
2435  */
2436 mblk_t *
2437 ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2438 {
2439 	qlge_t *qlge = rx_ring->qlge;
2440 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2441 	struct ib_mac_iocb_rsp *net_rsp;
2442 	mblk_t *mp;
2443 	mblk_t *mblk_head;
2444 	mblk_t **mblk_tail;
2445 	uint32_t received_bytes = 0;
2446 	uint32_t length;
2447 #ifdef QLGE_PERFORMANCE
2448 	uint32_t pkt_ct = 0;
2449 #endif
2450 
2451 #ifdef QLGE_TRACK_BUFFER_USAGE
2452 	uint32_t consumer_idx;
2453 	uint32_t producer_idx;
2454 	uint32_t num_free_entries;
2455 	uint32_t temp;
2456 
2457 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2458 	consumer_idx = temp & 0x0000ffff;
2459 	producer_idx = (temp >> 16);
2460 
2461 	if (consumer_idx > producer_idx)
2462 		num_free_entries = (consumer_idx - producer_idx);
2463 	else
2464 		num_free_entries = NUM_RX_RING_ENTRIES - (
2465 		    producer_idx - consumer_idx);
2466 
2467 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2468 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2469 
2470 #endif
2471 	mblk_head = NULL;
2472 	mblk_tail = &mblk_head;
2473 
2474 	while ((prod != rx_ring->cnsmr_idx)) {
2475 		QL_PRINT(DBG_RX,
2476 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2477 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2478 
2479 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2480 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2481 		    (off_t)((uintptr_t)net_rsp -
2482 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2483 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2484 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2485 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2486 
2487 		switch (net_rsp->opcode) {
2488 
2489 		case OPCODE_IB_MAC_IOCB:
2490 			/* Adding length of pkt header and payload */
2491 			length = le32_to_cpu(net_rsp->data_len) +
2492 			    le32_to_cpu(net_rsp->hdr_len);
2493 			if ((poll_bytes != QLGE_POLL_ALL) &&
2494 			    ((received_bytes + length) > poll_bytes)) {
2495 				continue;
2496 			}
2497 			received_bytes += length;
2498 
2499 #ifdef QLGE_PERFORMANCE
2500 			pkt_ct++;
2501 #endif
2502 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2503 			if (mp != NULL) {
2504 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2505 					/*
2506 					 * Increment number of packets we have
2507 					 * indicated to the stack, should be
2508 					 * decremented when we get it back
2509 					 * or when freemsg is called
2510 					 */
2511 					ASSERT(rx_ring->rx_indicate
2512 					    <= rx_ring->cq_len);
2513 #ifdef QLGE_LOAD_UNLOAD
2514 					cmn_err(CE_WARN, "%s do not send to OS,"
2515 					    " mac_flags %d, indicate %d",
2516 					    __func__, rx_ring->mac_flags,
2517 					    rx_ring->rx_indicate);
2518 #endif
2519 					QL_PRINT(DBG_RX,
2520 					    ("cq_id = %d, packet "
2521 					    "dropped, mac not "
2522 					    "enabled.\n",
2523 					    rx_ring->cq_id));
2524 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2525 
2526 					/* rx_lock is expected to be held */
2527 					mutex_exit(&rx_ring->rx_lock);
2528 					freemsg(mp);
2529 					mutex_enter(&rx_ring->rx_lock);
2530 					mp = NULL;
2531 				}
2532 
2533 				if (mp != NULL) {
2534 					/*
2535 					 * IP full packet has been
2536 					 * successfully verified by
2537 					 * H/W and is correct
2538 					 */
2539 					ql_set_rx_cksum(mp, net_rsp);
2540 
2541 					rx_ring->rx_packets++;
2542 					rx_ring->rx_bytes += length;
2543 					*mblk_tail = mp;
2544 					mblk_tail = &mp->b_next;
2545 				}
2546 			} else {
2547 				QL_PRINT(DBG_RX,
2548 				    ("cq_id = %d, packet dropped\n",
2549 				    rx_ring->cq_id));
2550 				rx_ring->rx_packets_dropped_no_buffer++;
2551 			}
2552 			break;
2553 
2554 		case OPCODE_IB_SYS_EVENT_IOCB:
2555 			ql_process_chip_ae_intr(qlge,
2556 			    (struct ib_sys_event_iocb_rsp *)
2557 			    net_rsp);
2558 			break;
2559 
2560 		default:
2561 			cmn_err(CE_WARN,
2562 			    "%s Ring(%d)Hit default case, not handled!"
2563 			    " dropping the packet, "
2564 			    "opcode = %x.", __func__, rx_ring->cq_id,
2565 			    net_rsp->opcode);
2566 			break;
2567 		}
2568 		/* increment cnsmr_idx and curr_entry */
2569 		ql_update_cq(rx_ring);
2570 		prod = ql_read_sh_reg(qlge, rx_ring);
2571 
2572 	}
2573 
2574 #ifdef QLGE_PERFORMANCE
2575 	if (pkt_ct >= 7)
2576 		rx_ring->hist[7]++;
2577 	else if (pkt_ct == 6)
2578 		rx_ring->hist[6]++;
2579 	else if (pkt_ct == 5)
2580 		rx_ring->hist[5]++;
2581 	else if (pkt_ct == 4)
2582 		rx_ring->hist[4]++;
2583 	else if (pkt_ct == 3)
2584 		rx_ring->hist[3]++;
2585 	else if (pkt_ct == 2)
2586 		rx_ring->hist[2]++;
2587 	else if (pkt_ct == 1)
2588 		rx_ring->hist[1]++;
2589 	else if (pkt_ct == 0)
2590 		rx_ring->hist[0]++;
2591 #endif
2592 
2593 	/* update cnsmr_idx */
2594 	ql_write_cq_idx(rx_ring);
2595 	/* do not enable interrupt for polling mode */
2596 	if (poll_bytes == QLGE_POLL_ALL)
2597 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2598 	return (mblk_head);
2599 }
2600 
2601 /* Process an outbound completion from an rx ring. */
2602 static void
2603 ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2604 {
2605 	struct tx_ring *tx_ring;
2606 	struct tx_ring_desc *tx_ring_desc;
2607 	int j;
2608 
2609 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2610 	tx_ring_desc = tx_ring->wq_desc;
2611 	tx_ring_desc += mac_rsp->tid;
2612 
2613 	if (tx_ring_desc->tx_type == USE_DMA) {
2614 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2615 		    __func__, qlge->instance));
2616 
2617 		/*
2618 		 * Release the DMA resource that is used for
2619 		 * DMA binding.
2620 		 */
2621 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2622 			(void) ddi_dma_unbind_handle(
2623 			    tx_ring_desc->tx_dma_handle[j]);
2624 		}
2625 
2626 		tx_ring_desc->tx_dma_handle_used = 0;
2627 		/*
2628 		 * Free the mblk after sending completed
2629 		 */
2630 		if (tx_ring_desc->mp != NULL) {
2631 			freemsg(tx_ring_desc->mp);
2632 			tx_ring_desc->mp = NULL;
2633 		}
2634 	}
2635 
2636 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2637 	tx_ring->opackets++;
2638 
2639 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2640 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2641 		tx_ring->errxmt++;
2642 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2643 			/* EMPTY */
2644 			QL_PRINT(DBG_TX,
2645 			    ("Total descriptor length did not match "
2646 			    "transfer length.\n"));
2647 		}
2648 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2649 			/* EMPTY */
2650 			QL_PRINT(DBG_TX,
2651 			    ("Frame too short to be legal, not sent.\n"));
2652 		}
2653 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2654 			/* EMPTY */
2655 			QL_PRINT(DBG_TX,
2656 			    ("Frame too long, but sent anyway.\n"));
2657 		}
2658 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2659 			/* EMPTY */
2660 			QL_PRINT(DBG_TX,
2661 			    ("PCI backplane error. Frame not sent.\n"));
2662 		}
2663 	}
2664 	atomic_inc_32(&tx_ring->tx_free_count);
2665 }
2666 
2667 /*
2668  * clean up tx completion iocbs
2669  */
2670 int
2671 ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2672 {
2673 	qlge_t *qlge = rx_ring->qlge;
2674 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2675 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2676 	int count = 0;
2677 	struct tx_ring *tx_ring;
2678 	boolean_t resume_tx = B_FALSE;
2679 
2680 	mutex_enter(&rx_ring->rx_lock);
2681 #ifdef QLGE_TRACK_BUFFER_USAGE
2682 	{
2683 	uint32_t consumer_idx;
2684 	uint32_t producer_idx;
2685 	uint32_t num_free_entries;
2686 	uint32_t temp;
2687 
2688 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2689 	consumer_idx = temp & 0x0000ffff;
2690 	producer_idx = (temp >> 16);
2691 
2692 	if (consumer_idx > producer_idx)
2693 		num_free_entries = (consumer_idx - producer_idx);
2694 	else
2695 		num_free_entries = NUM_RX_RING_ENTRIES -
2696 		    (producer_idx - consumer_idx);
2697 
2698 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2699 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2700 
2701 	}
2702 #endif
2703 	/* While there are entries in the completion queue. */
2704 	while (prod != rx_ring->cnsmr_idx) {
2705 
2706 		QL_PRINT(DBG_RX,
2707 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2708 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2709 
2710 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2711 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2712 		    (off_t)((uintptr_t)net_rsp -
2713 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2714 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2715 
2716 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2717 		    "response packet data\n",
2718 		    rx_ring->curr_entry, 8,
2719 		    (size_t)sizeof (*net_rsp));
2720 
2721 		switch (net_rsp->opcode) {
2722 
2723 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2724 		case OPCODE_OB_MAC_IOCB:
2725 			ql_process_mac_tx_intr(qlge, net_rsp);
2726 			break;
2727 
2728 		default:
2729 			cmn_err(CE_WARN,
2730 			    "%s Hit default case, not handled! "
2731 			    "dropping the packet,"
2732 			    " opcode = %x.",
2733 			    __func__, net_rsp->opcode);
2734 			break;
2735 		}
2736 		count++;
2737 		ql_update_cq(rx_ring);
2738 		prod = ql_read_sh_reg(qlge, rx_ring);
2739 	}
2740 	ql_write_cq_idx(rx_ring);
2741 
2742 	mutex_exit(&rx_ring->rx_lock);
2743 
2744 	net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2745 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2746 
2747 	mutex_enter(&tx_ring->tx_lock);
2748 
2749 	if (tx_ring->queue_stopped &&
2750 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2751 		/*
2752 		 * The queue got stopped because the tx_ring was full.
2753 		 * Wake it up, because it's now at least 25% empty.
2754 		 */
2755 		tx_ring->queue_stopped = 0;
2756 		resume_tx = B_TRUE;
2757 	}
2758 
2759 	mutex_exit(&tx_ring->tx_lock);
2760 	/* Don't hold the lock during OS callback */
2761 	if (resume_tx)
2762 		RESUME_TX(tx_ring);
2763 	return (count);
2764 }
2765 
2766 /*
2767  * reset asic when error happens
2768  */
2769 /* ARGSUSED */
2770 static uint_t
2771 ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2772 {
2773 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2774 	int status;
2775 
2776 	mutex_enter(&qlge->gen_mutex);
2777 	(void) ql_do_stop(qlge);
2778 	/*
2779 	 * Write default ethernet address to chip register Mac
2780 	 * Address slot 0 and Enable Primary Mac Function.
2781 	 */
2782 	mutex_enter(&qlge->hw_mutex);
2783 	(void) ql_unicst_set(qlge,
2784 	    (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
2785 	mutex_exit(&qlge->hw_mutex);
2786 	qlge->mac_flags = QL_MAC_INIT;
2787 	status = ql_do_start(qlge);
2788 	if (status != DDI_SUCCESS)
2789 		goto error;
2790 	qlge->mac_flags = QL_MAC_STARTED;
2791 	mutex_exit(&qlge->gen_mutex);
2792 	ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
2793 
2794 	return (DDI_INTR_CLAIMED);
2795 
2796 error:
2797 	mutex_exit(&qlge->gen_mutex);
2798 	cmn_err(CE_WARN,
2799 	    "qlge up/down cycle failed, closing device");
2800 	if (qlge->fm_enable) {
2801 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2802 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
2803 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2804 	}
2805 	return (DDI_INTR_CLAIMED);
2806 }
2807 
2808 /*
2809  * Reset MPI
2810  */
2811 /* ARGSUSED */
2812 static uint_t
2813 ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2814 {
2815 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2816 
2817 	(void) ql_reset_mpi_risc(qlge);
2818 	return (DDI_INTR_CLAIMED);
2819 }
2820 
2821 /*
2822  * Process MPI mailbox messages
2823  */
2824 /* ARGSUSED */
2825 static uint_t
2826 ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2827 {
2828 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2829 
2830 	ql_do_mpi_intr(qlge);
2831 	return (DDI_INTR_CLAIMED);
2832 }
2833 
2834 /* Fire up a handler to reset the MPI processor. */
2835 void
2836 ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2837 {
2838 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2839 }
2840 
2841 static void
2842 ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2843 {
2844 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2845 }
2846 
2847 static void
2848 ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2849 {
2850 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2851 }
2852 
2853 /*
2854  * This handles a fatal error, MPI activity, and the default
2855  * rx_ring in an MSI-X multiple interrupt vector environment.
2856  * In MSI/Legacy environment it also process the rest of
2857  * the rx_rings.
2858  */
2859 /* ARGSUSED */
2860 static uint_t
2861 ql_isr(caddr_t arg1, caddr_t arg2)
2862 {
2863 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2864 	struct rx_ring *ob_ring;
2865 	qlge_t *qlge = rx_ring->qlge;
2866 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2867 	uint32_t var, prod;
2868 	int i;
2869 	int work_done = 0;
2870 
2871 	mblk_t *mp;
2872 
2873 	_NOTE(ARGUNUSED(arg2));
2874 
2875 	++qlge->rx_interrupts[rx_ring->cq_id];
2876 
2877 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2878 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2879 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2880 		var = ql_read_reg(qlge, REG_STATUS);
2881 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2882 		return (DDI_INTR_CLAIMED);
2883 	}
2884 
2885 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2886 
2887 	/*
2888 	 * process send completes on first stride tx ring if available
2889 	 */
2890 	if (qlge->isr_stride) {
2891 		ob_ring = &qlge->rx_ring[qlge->isr_stride];
2892 		if (ql_read_sh_reg(qlge, ob_ring) !=
2893 		    ob_ring->cnsmr_idx) {
2894 			(void) ql_clean_outbound_rx_ring(ob_ring);
2895 		}
2896 	}
2897 	/*
2898 	 * Check the default queue and wake handler if active.
2899 	 */
2900 	rx_ring = &qlge->rx_ring[0];
2901 	prod = ql_read_sh_reg(qlge, rx_ring);
2902 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2903 	    prod, rx_ring->cnsmr_idx));
2904 	/* check if interrupt is due to incoming packet */
2905 	if (prod != rx_ring->cnsmr_idx) {
2906 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2907 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2908 		mutex_enter(&rx_ring->rx_lock);
2909 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2910 		mutex_exit(&rx_ring->rx_lock);
2911 
2912 		if (mp != NULL)
2913 			RX_UPSTREAM(rx_ring, mp);
2914 		work_done++;
2915 	} else {
2916 		/*
2917 		 * If interrupt is not due to incoming packet, read status
2918 		 * register to see if error happens or mailbox interrupt.
2919 		 */
2920 		var = ql_read_reg(qlge, REG_STATUS);
2921 		if ((var & STATUS_FE) != 0) {
2922 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2923 			if (qlge->fm_enable) {
2924 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2925 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2926 				ddi_fm_service_impact(qlge->dip,
2927 				    DDI_SERVICE_LOST);
2928 			}
2929 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2930 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2931 			cmn_err(CE_WARN,
2932 			    "Resetting chip. Error Status Register = 0x%x",
2933 			    var);
2934 			ql_wake_asic_reset_soft_intr(qlge);
2935 			return (DDI_INTR_CLAIMED);
2936 		}
2937 
2938 		/*
2939 		 * Check MPI processor activity.
2940 		 */
2941 		if ((var & STATUS_PI) != 0) {
2942 			/*
2943 			 * We've got an async event or mailbox completion.
2944 			 * Handle it and clear the source of the interrupt.
2945 			 */
2946 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2947 
2948 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2949 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2950 			ql_wake_mpi_event_soft_intr(qlge);
2951 			work_done++;
2952 		}
2953 	}
2954 
2955 
2956 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2957 		/*
2958 		 * Start the DPC for each active queue.
2959 		 */
2960 		for (i = 1; i < qlge->rx_ring_count; i++) {
2961 			rx_ring = &qlge->rx_ring[i];
2962 
2963 			if (ql_read_sh_reg(qlge, rx_ring) !=
2964 			    rx_ring->cnsmr_idx) {
2965 				QL_PRINT(DBG_INTR,
2966 				    ("Waking handler for rx_ring[%d].\n", i));
2967 
2968 				ql_disable_completion_interrupt(qlge,
2969 				    rx_ring->irq);
2970 				if (rx_ring->type == TX_Q) {
2971 					(void) ql_clean_outbound_rx_ring(
2972 					    rx_ring);
2973 					ql_enable_completion_interrupt(
2974 					    rx_ring->qlge, rx_ring->irq);
2975 				} else {
2976 					mutex_enter(&rx_ring->rx_lock);
2977 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2978 					mutex_exit(&rx_ring->rx_lock);
2979 					if (mp != NULL)
2980 						RX_UPSTREAM(rx_ring, mp);
2981 #ifdef QLGE_LOAD_UNLOAD
2982 					if (rx_ring->mac_flags ==
2983 					    QL_MAC_STOPPED)
2984 						cmn_err(CE_NOTE,
2985 						    "%s rx_indicate(%d) %d\n",
2986 						    __func__, i,
2987 						    rx_ring->rx_indicate);
2988 #endif
2989 				}
2990 				work_done++;
2991 			}
2992 		}
2993 	}
2994 
2995 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
2996 
2997 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2998 }
2999 
3000 /*
3001  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
3002  */
3003 /* ARGSUSED */
3004 static uint_t
3005 ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
3006 {
3007 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3008 	qlge_t *qlge = rx_ring->qlge;
3009 	_NOTE(ARGUNUSED(arg2));
3010 
3011 	++qlge->rx_interrupts[rx_ring->cq_id];
3012 	(void) ql_clean_outbound_rx_ring(rx_ring);
3013 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
3014 
3015 	return (DDI_INTR_CLAIMED);
3016 }
3017 
3018 /*
3019  * MSI-X Multiple Vector Interrupt Handler
3020  */
3021 /* ARGSUSED */
3022 static uint_t
3023 ql_msix_isr(caddr_t arg1, caddr_t arg2)
3024 {
3025 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3026 	struct rx_ring *ob_ring;
3027 	qlge_t *qlge = rx_ring->qlge;
3028 	mblk_t *mp;
3029 	_NOTE(ARGUNUSED(arg2));
3030 
3031 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3032 
3033 	ql_disable_completion_interrupt(qlge, rx_ring->irq);
3034 
3035 	/*
3036 	 * process send completes on stride tx ring if available
3037 	 */
3038 	if (qlge->isr_stride) {
3039 		ob_ring = rx_ring + qlge->isr_stride;
3040 		if (ql_read_sh_reg(qlge, ob_ring) !=
3041 		    ob_ring->cnsmr_idx) {
3042 			++qlge->rx_interrupts[ob_ring->cq_id];
3043 			(void) ql_clean_outbound_rx_ring(ob_ring);
3044 		}
3045 	}
3046 
3047 	++qlge->rx_interrupts[rx_ring->cq_id];
3048 
3049 	mutex_enter(&rx_ring->rx_lock);
3050 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3051 	mutex_exit(&rx_ring->rx_lock);
3052 
3053 	if (mp != NULL)
3054 		RX_UPSTREAM(rx_ring, mp);
3055 
3056 	return (DDI_INTR_CLAIMED);
3057 }
3058 
3059 /*
3060  * Poll n_bytes of chained incoming packets
3061  */
3062 mblk_t *
3063 ql_ring_rx_poll(void *arg, int n_bytes)
3064 {
3065 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
3066 	qlge_t *qlge = rx_ring->qlge;
3067 	mblk_t *mp = NULL;
3068 	uint32_t var;
3069 
3070 	ASSERT(n_bytes >= 0);
3071 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
3072 	    __func__, rx_ring->cq_id, n_bytes));
3073 
3074 	++qlge->rx_polls[rx_ring->cq_id];
3075 
3076 	if (n_bytes == 0)
3077 		return (mp);
3078 	mutex_enter(&rx_ring->rx_lock);
3079 	mp = ql_ring_rx(rx_ring, n_bytes);
3080 	mutex_exit(&rx_ring->rx_lock);
3081 
3082 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
3083 		var = ql_read_reg(qlge, REG_STATUS);
3084 		/*
3085 		 * Check for fatal error.
3086 		 */
3087 		if ((var & STATUS_FE) != 0) {
3088 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
3089 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
3090 			cmn_err(CE_WARN, "Got fatal error %x.", var);
3091 			ql_wake_asic_reset_soft_intr(qlge);
3092 			if (qlge->fm_enable) {
3093 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
3094 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
3095 				ddi_fm_service_impact(qlge->dip,
3096 				    DDI_SERVICE_LOST);
3097 			}
3098 		}
3099 		/*
3100 		 * Check MPI processor activity.
3101 		 */
3102 		if ((var & STATUS_PI) != 0) {
3103 			/*
3104 			 * We've got an async event or mailbox completion.
3105 			 * Handle it and clear the source of the interrupt.
3106 			 */
3107 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
3108 			ql_do_mpi_intr(qlge);
3109 		}
3110 	}
3111 
3112 	return (mp);
3113 }
3114 
3115 /*
3116  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
3117  */
3118 /* ARGSUSED */
3119 static uint_t
3120 ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
3121 {
3122 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3123 	qlge_t *qlge = rx_ring->qlge;
3124 	mblk_t *mp;
3125 	_NOTE(ARGUNUSED(arg2));
3126 
3127 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3128 
3129 	++qlge->rx_interrupts[rx_ring->cq_id];
3130 
3131 	mutex_enter(&rx_ring->rx_lock);
3132 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3133 	mutex_exit(&rx_ring->rx_lock);
3134 
3135 	if (mp != NULL)
3136 		RX_UPSTREAM(rx_ring, mp);
3137 
3138 	return (DDI_INTR_CLAIMED);
3139 }
3140 
3141 
3142 /*
3143  *
3144  * Allocate DMA Buffer for ioctl service
3145  *
3146  */
3147 static int
3148 ql_alloc_ioctl_dma_buf(qlge_t *qlge)
3149 {
3150 	uint64_t phy_addr;
3151 	uint64_t alloc_size;
3152 	ddi_dma_cookie_t dma_cookie;
3153 
3154 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
3155 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
3156 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
3157 	    &ql_buf_acc_attr,
3158 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3159 	    &qlge->ioctl_buf_dma_attr.acc_handle,
3160 	    (size_t)alloc_size,  /* mem size */
3161 	    (size_t)0,  /* alignment */
3162 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
3163 	    &dma_cookie) != 0) {
3164 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
3165 		    __func__, qlge->instance);
3166 		return (DDI_FAILURE);
3167 	}
3168 
3169 	phy_addr = dma_cookie.dmac_laddress;
3170 
3171 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
3172 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
3173 		return (DDI_FAILURE);
3174 	}
3175 
3176 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
3177 
3178 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
3179 	    "phy_addr = 0x%lx\n",
3180 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
3181 
3182 	return (DDI_SUCCESS);
3183 }
3184 
3185 
3186 /*
3187  * Function to free physical memory.
3188  */
3189 static void
3190 ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
3191 {
3192 	if (dma_handle != NULL) {
3193 		(void) ddi_dma_unbind_handle(*dma_handle);
3194 		if (acc_handle != NULL)
3195 			ddi_dma_mem_free(acc_handle);
3196 		ddi_dma_free_handle(dma_handle);
3197 	}
3198 }
3199 
3200 /*
3201  * Function to free ioctl dma buffer.
3202  */
3203 static void
3204 ql_free_ioctl_dma_buf(qlge_t *qlge)
3205 {
3206 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
3207 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
3208 		    &qlge->ioctl_buf_dma_attr.acc_handle);
3209 
3210 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
3211 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
3212 	}
3213 }
3214 
3215 /*
3216  * Free shadow register space used for request and completion queues
3217  */
3218 static void
3219 ql_free_shadow_space(qlge_t *qlge)
3220 {
3221 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
3222 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3223 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
3224 		bzero(&qlge->host_copy_shadow_dma_attr,
3225 		    sizeof (qlge->host_copy_shadow_dma_attr));
3226 	}
3227 
3228 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
3229 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3230 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
3231 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3232 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3233 	}
3234 }
3235 
3236 /*
3237  * Allocate shadow register space for request and completion queues
3238  */
3239 static int
3240 ql_alloc_shadow_space(qlge_t *qlge)
3241 {
3242 	ddi_dma_cookie_t dma_cookie;
3243 
3244 	if (ql_alloc_phys(qlge->dip,
3245 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
3246 	    &ql_dev_acc_attr,
3247 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3248 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
3249 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3250 	    (size_t)4, /* 4 bytes alignment */
3251 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
3252 	    &dma_cookie) != 0) {
3253 		bzero(&qlge->host_copy_shadow_dma_attr,
3254 		    sizeof (qlge->host_copy_shadow_dma_attr));
3255 
3256 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
3257 		    "response shadow registers", __func__, qlge->instance);
3258 		return (DDI_FAILURE);
3259 	}
3260 
3261 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3262 
3263 	if (ql_alloc_phys(qlge->dip,
3264 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3265 	    &ql_desc_acc_attr,
3266 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3267 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
3268 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3269 	    (size_t)4, /* 4 bytes alignment */
3270 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
3271 	    &dma_cookie) != 0) {
3272 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3273 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3274 
3275 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
3276 		    "for request shadow registers",
3277 		    __func__, qlge->instance);
3278 		goto err_wqp_sh_area;
3279 	}
3280 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3281 
3282 	return (DDI_SUCCESS);
3283 
3284 err_wqp_sh_area:
3285 	ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3286 	    &qlge->host_copy_shadow_dma_attr.acc_handle);
3287 	bzero(&qlge->host_copy_shadow_dma_attr,
3288 	    sizeof (qlge->host_copy_shadow_dma_attr));
3289 
3290 	return (DDI_FAILURE);
3291 }
3292 
3293 /*
3294  * Initialize a tx ring
3295  */
3296 static void
3297 ql_init_tx_ring(struct tx_ring *tx_ring)
3298 {
3299 	int i;
3300 	struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
3301 	struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
3302 
3303 	for (i = 0; i < tx_ring->wq_len; i++) {
3304 		tx_ring_desc->index = i;
3305 		tx_ring_desc->queue_entry = mac_iocb_ptr;
3306 		mac_iocb_ptr++;
3307 		tx_ring_desc++;
3308 	}
3309 	tx_ring->tx_free_count = tx_ring->wq_len;
3310 	tx_ring->queue_stopped = 0;
3311 }
3312 
3313 /*
3314  * Free one tx ring resources
3315  */
3316 static void
3317 ql_free_tx_resources(struct tx_ring *tx_ring)
3318 {
3319 	struct tx_ring_desc *tx_ring_desc;
3320 	int i, j;
3321 
3322 	ql_free_phys(&tx_ring->wq_dma.dma_handle, &tx_ring->wq_dma.acc_handle);
3323 	bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3324 
3325 	if (tx_ring->wq_desc != NULL) {
3326 		tx_ring_desc = tx_ring->wq_desc;
3327 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3328 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3329 				if (tx_ring_desc->tx_dma_handle[j]) {
3330 					/*
3331 					 * The unbinding will happen in tx
3332 					 * completion, here we just free the
3333 					 * handles
3334 					 */
3335 					ddi_dma_free_handle(
3336 					    &(tx_ring_desc->tx_dma_handle[j]));
3337 					tx_ring_desc->tx_dma_handle[j] = NULL;
3338 				}
3339 			}
3340 			if (tx_ring_desc->oal != NULL) {
3341 				tx_ring_desc->oal_dma_addr = 0;
3342 				tx_ring_desc->oal = NULL;
3343 				tx_ring_desc->copy_buffer = NULL;
3344 				tx_ring_desc->copy_buffer_dma_addr = 0;
3345 
3346 				ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3347 				    &tx_ring_desc->oal_dma.acc_handle);
3348 			}
3349 		}
3350 		kmem_free(tx_ring->wq_desc,
3351 		    tx_ring->wq_len * sizeof (struct tx_ring_desc));
3352 		tx_ring->wq_desc = NULL;
3353 	}
3354 	/* free the wqicb struct */
3355 	if (tx_ring->wqicb_dma.dma_handle) {
3356 		ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3357 		    &tx_ring->wqicb_dma.acc_handle);
3358 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3359 	}
3360 }
3361 
3362 /*
3363  * Allocate work (request) queue memory and transmit
3364  * descriptors for this transmit ring
3365  */
3366 static int
3367 ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3368 {
3369 	ddi_dma_cookie_t dma_cookie;
3370 	struct tx_ring_desc *tx_ring_desc;
3371 	int i, j;
3372 	uint32_t length;
3373 
3374 	/* allocate dma buffers for obiocbs */
3375 	if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3376 	    &ql_desc_acc_attr,
3377 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3378 	    &tx_ring->wq_dma.acc_handle,
3379 	    (size_t)tx_ring->wq_size,	/* mem size */
3380 	    (size_t)128, /* alignment:128 bytes boundary */
3381 	    (caddr_t *)&tx_ring->wq_dma.vaddr,
3382 	    &dma_cookie) != 0) {
3383 		bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
3384 		cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3385 		    __func__, qlge->instance);
3386 		return (DDI_FAILURE);
3387 	}
3388 	tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3389 
3390 	tx_ring->wq_desc =
3391 	    kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3392 	    KM_NOSLEEP);
3393 	if (tx_ring->wq_desc == NULL) {
3394 		goto err;
3395 	} else {
3396 		tx_ring_desc = tx_ring->wq_desc;
3397 		/*
3398 		 * Allocate a large enough structure to hold the following
3399 		 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3400 		 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3401 		 */
3402 		length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3403 		    + QL_MAX_COPY_LENGTH;
3404 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3405 
3406 			if (ql_alloc_phys(qlge->dip,
3407 			    &tx_ring_desc->oal_dma.dma_handle,
3408 			    &ql_desc_acc_attr,
3409 			    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3410 			    &tx_ring_desc->oal_dma.acc_handle,
3411 			    (size_t)length,	/* mem size */
3412 			    (size_t)0, /* default alignment:8 bytes boundary */
3413 			    (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3414 			    &dma_cookie) != 0) {
3415 				bzero(&tx_ring_desc->oal_dma,
3416 				    sizeof (tx_ring_desc->oal_dma));
3417 				cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3418 				    "oal alloc failed.",
3419 				    __func__, qlge->instance);
3420 				return (DDI_FAILURE);
3421 			}
3422 
3423 			tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3424 			tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3425 			tx_ring_desc->copy_buffer =
3426 			    (caddr_t)((uint8_t *)tx_ring_desc->oal
3427 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3428 			tx_ring_desc->copy_buffer_dma_addr =
3429 			    (tx_ring_desc->oal_dma_addr
3430 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3431 
3432 			/* Allocate dma handles for transmit buffers */
3433 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3434 				if (ddi_dma_alloc_handle(qlge->dip,
3435 				    &tx_mapping_dma_attr,
3436 				    DDI_DMA_DONTWAIT,
3437 				    0, &tx_ring_desc->tx_dma_handle[j])
3438 				    != DDI_SUCCESS) {
3439 					cmn_err(CE_WARN,
3440 					    "!%s: ddi_dma_alloc_handle: "
3441 					    "tx_dma_handle "
3442 					    "alloc failed", __func__);
3443 					goto err;
3444 				}
3445 			}
3446 		}
3447 	}
3448 	/* alloc a wqicb control block to load this tx ring to hw */
3449 	if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3450 	    &ql_desc_acc_attr,
3451 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3452 	    &tx_ring->wqicb_dma.acc_handle,
3453 	    (size_t)sizeof (struct wqicb_t),	/* mem size */
3454 	    (size_t)0, /* alignment:128 bytes boundary */
3455 	    (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3456 	    &dma_cookie) != 0) {
3457 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3458 		cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3459 		    __func__, qlge->instance);
3460 		return (DDI_FAILURE);
3461 	}
3462 	tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3463 
3464 	return (DDI_SUCCESS);
3465 
3466 err:
3467 	ql_free_tx_resources(tx_ring);
3468 	return (DDI_FAILURE);
3469 }
3470 
3471 /*
3472  * Free one rx ring resources
3473  */
3474 static void
3475 ql_free_rx_resources(struct rx_ring *rx_ring)
3476 {
3477 	/* Free the small buffer queue. */
3478 	if (rx_ring->sbq_dma.dma_handle) {
3479 		ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3480 		    &rx_ring->sbq_dma.acc_handle);
3481 		bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3482 	}
3483 
3484 	/* Free the small buffer queue control blocks. */
3485 	kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3486 	    sizeof (struct bq_desc));
3487 	rx_ring->sbq_desc = NULL;
3488 
3489 	/* Free the large buffer queue. */
3490 	if (rx_ring->lbq_dma.dma_handle) {
3491 		ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3492 		    &rx_ring->lbq_dma.acc_handle);
3493 		bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3494 	}
3495 
3496 	/* Free the large buffer queue control blocks. */
3497 	kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3498 	    sizeof (struct bq_desc));
3499 	rx_ring->lbq_desc = NULL;
3500 
3501 	/* Free cqicb struct */
3502 	if (rx_ring->cqicb_dma.dma_handle) {
3503 		ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3504 		    &rx_ring->cqicb_dma.acc_handle);
3505 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3506 	}
3507 	/* Free the rx queue. */
3508 	if (rx_ring->cq_dma.dma_handle) {
3509 		ql_free_phys(&rx_ring->cq_dma.dma_handle,
3510 		    &rx_ring->cq_dma.acc_handle);
3511 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3512 	}
3513 }
3514 
3515 /*
3516  * Allocate queues and buffers for this completions queue based
3517  * on the values in the parameter structure.
3518  */
3519 static int
3520 ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3521 {
3522 	ddi_dma_cookie_t dma_cookie;
3523 
3524 	if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3525 	    &ql_desc_acc_attr,
3526 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3527 	    &rx_ring->cq_dma.acc_handle,
3528 	    (size_t)rx_ring->cq_size,  /* mem size */
3529 	    (size_t)128, /* alignment:128 bytes boundary */
3530 	    (caddr_t *)&rx_ring->cq_dma.vaddr,
3531 	    &dma_cookie) != 0)	{
3532 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3533 		cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3534 		    __func__, qlge->instance);
3535 		return (DDI_FAILURE);
3536 	}
3537 	rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3538 
3539 	if (rx_ring->sbq_len != 0) {
3540 		/*
3541 		 * Allocate small buffer queue.
3542 		 */
3543 		if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3544 		    &ql_desc_acc_attr,
3545 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3546 		    &rx_ring->sbq_dma.acc_handle,
3547 		    (size_t)rx_ring->sbq_size,  /* mem size */
3548 		    (size_t)128, /* alignment:128 bytes boundary */
3549 		    (caddr_t *)&rx_ring->sbq_dma.vaddr,
3550 		    &dma_cookie) != 0) {
3551 			bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3552 			cmn_err(CE_WARN,
3553 			    "%s(%d): small buffer queue allocation failed.",
3554 			    __func__, qlge->instance);
3555 			goto err_mem;
3556 		}
3557 		rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3558 
3559 		/*
3560 		 * Allocate small buffer queue control blocks.
3561 		 */
3562 		rx_ring->sbq_desc =
3563 		    kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3564 		    KM_NOSLEEP);
3565 		if (rx_ring->sbq_desc == NULL) {
3566 			cmn_err(CE_WARN,
3567 			    "sbq control block allocation failed.");
3568 			goto err_mem;
3569 		}
3570 
3571 		ql_init_sbq_ring(rx_ring);
3572 	}
3573 
3574 	if (rx_ring->lbq_len != 0) {
3575 		/*
3576 		 * Allocate large buffer queue.
3577 		 */
3578 		if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3579 		    &ql_desc_acc_attr,
3580 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3581 		    &rx_ring->lbq_dma.acc_handle,
3582 		    (size_t)rx_ring->lbq_size,  /* mem size */
3583 		    (size_t)128, /* alignment:128 bytes boundary */
3584 		    (caddr_t *)&rx_ring->lbq_dma.vaddr,
3585 		    &dma_cookie) != 0) {
3586 			bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3587 			cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3588 			    __func__, qlge->instance);
3589 			goto err_mem;
3590 		}
3591 		rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3592 
3593 		/*
3594 		 * Allocate large buffer queue control blocks.
3595 		 */
3596 		rx_ring->lbq_desc =
3597 		    kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3598 		    KM_NOSLEEP);
3599 		if (rx_ring->lbq_desc == NULL) {
3600 			cmn_err(CE_WARN,
3601 			    "Large buffer queue control block allocation "
3602 			    "failed.");
3603 			goto err_mem;
3604 		}
3605 		ql_init_lbq_ring(rx_ring);
3606 	}
3607 
3608 	if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3609 	    &ql_desc_acc_attr,
3610 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3611 	    &rx_ring->cqicb_dma.acc_handle,
3612 	    (size_t)sizeof (struct cqicb_t),  /* mem size */
3613 	    (size_t)0, /* alignment:128 bytes boundary */
3614 	    (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3615 	    &dma_cookie) != 0) {
3616 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3617 		cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3618 		    __func__, qlge->instance);
3619 		return (DDI_FAILURE);
3620 	}
3621 	rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3622 
3623 	return (DDI_SUCCESS);
3624 
3625 err_mem:
3626 	ql_free_rx_resources(rx_ring);
3627 	return (DDI_FAILURE);
3628 }
3629 
3630 /*
3631  * Frees tx/rx queues memory resources
3632  */
3633 static void
3634 ql_free_mem_resources(qlge_t *qlge)
3635 {
3636 	int i;
3637 
3638 	if (qlge->ricb_dma.dma_handle) {
3639 		/* free the ricb struct */
3640 		ql_free_phys(&qlge->ricb_dma.dma_handle,
3641 		    &qlge->ricb_dma.acc_handle);
3642 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3643 	}
3644 
3645 	ql_free_rx_buffers(qlge);
3646 
3647 	ql_free_ioctl_dma_buf(qlge);
3648 
3649 	for (i = 0; i < qlge->tx_ring_count; i++)
3650 		ql_free_tx_resources(&qlge->tx_ring[i]);
3651 
3652 	for (i = 0; i < qlge->rx_ring_count; i++)
3653 		ql_free_rx_resources(&qlge->rx_ring[i]);
3654 
3655 	ql_free_shadow_space(qlge);
3656 }
3657 
3658 /*
3659  * Allocate buffer queues, large buffers and small buffers etc
3660  *
3661  * This API is called in the gld_attach member function. It is called
3662  * only once.  Later reset,reboot should not re-allocate all rings and
3663  * buffers.
3664  */
3665 static int
3666 ql_alloc_mem_resources(qlge_t *qlge)
3667 {
3668 	int i;
3669 	ddi_dma_cookie_t dma_cookie;
3670 
3671 	/* Allocate space for our shadow registers */
3672 	if (ql_alloc_shadow_space(qlge))
3673 		return (DDI_FAILURE);
3674 
3675 	for (i = 0; i < qlge->rx_ring_count; i++) {
3676 		if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3677 			cmn_err(CE_WARN, "RX resource allocation failed.");
3678 			goto err_mem;
3679 		}
3680 	}
3681 	/* Allocate tx queue resources */
3682 	for (i = 0; i < qlge->tx_ring_count; i++) {
3683 		if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3684 			cmn_err(CE_WARN, "Tx resource allocation failed.");
3685 			goto err_mem;
3686 		}
3687 	}
3688 
3689 	if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3690 		goto err_mem;
3691 	}
3692 
3693 	if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3694 		cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3695 		    __func__, qlge->instance);
3696 		goto err_mem;
3697 	}
3698 
3699 	qlge->sequence |= INIT_ALLOC_RX_BUF;
3700 
3701 	if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3702 	    &ql_desc_acc_attr,
3703 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3704 	    &qlge->ricb_dma.acc_handle,
3705 	    (size_t)sizeof (struct ricb),  /* mem size */
3706 	    (size_t)0, /* alignment:128 bytes boundary */
3707 	    (caddr_t *)&qlge->ricb_dma.vaddr,
3708 	    &dma_cookie) != 0) {
3709 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3710 		cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3711 		    __func__, qlge->instance);
3712 		return (DDI_FAILURE);
3713 	}
3714 	qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3715 
3716 	return (DDI_SUCCESS);
3717 
3718 err_mem:
3719 	return (DDI_FAILURE);
3720 }
3721 
3722 
3723 /*
3724  * Function used to allocate physical memory and zero it.
3725  */
3726 
3727 static int
3728 ql_alloc_phys_rbuf(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3729     ddi_device_acc_attr_t *device_acc_attr,
3730     uint_t dma_flags,
3731     ddi_acc_handle_t *acc_handle,
3732     size_t size,
3733     size_t alignment,
3734     caddr_t *vaddr,
3735     ddi_dma_cookie_t *dma_cookie)
3736 {
3737 	size_t rlen;
3738 	uint_t cnt;
3739 
3740 	/*
3741 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3742 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3743 	 * to make sure buffer has enough room for overrun.
3744 	 */
3745 	if (size & 7) {
3746 		size += 8 - (size & 7);
3747 	}
3748 
3749 	/* Adjust the alignment if requested */
3750 	if (alignment) {
3751 		dma_attr.dma_attr_align = alignment;
3752 	}
3753 
3754 	/*
3755 	 * Allocate DMA handle
3756 	 */
3757 	if (ddi_dma_alloc_handle(dip, &dma_attr_rbuf, DDI_DMA_DONTWAIT, NULL,
3758 	    dma_handle) != DDI_SUCCESS) {
3759 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3760 		    __func__);
3761 		return (QL_ERROR);
3762 	}
3763 	/*
3764 	 * Allocate DMA memory
3765 	 */
3766 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3767 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3768 	    DDI_DMA_DONTWAIT,
3769 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3770 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3771 		ddi_dma_free_handle(dma_handle);
3772 		return (QL_ERROR);
3773 	}
3774 
3775 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3776 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3777 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3778 		ddi_dma_mem_free(acc_handle);
3779 
3780 		ddi_dma_free_handle(dma_handle);
3781 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3782 		    __func__);
3783 		return (QL_ERROR);
3784 	}
3785 
3786 	if (cnt != 1) {
3787 
3788 		ql_free_phys(dma_handle, acc_handle);
3789 
3790 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3791 		    __func__);
3792 		return (QL_ERROR);
3793 	}
3794 
3795 	bzero((caddr_t)*vaddr, rlen);
3796 
3797 	return (0);
3798 }
3799 
3800 /*
3801  * Function used to allocate physical memory and zero it.
3802  */
3803 static int
3804 ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3805     ddi_device_acc_attr_t *device_acc_attr,
3806     uint_t dma_flags,
3807     ddi_acc_handle_t *acc_handle,
3808     size_t size,
3809     size_t alignment,
3810     caddr_t *vaddr,
3811     ddi_dma_cookie_t *dma_cookie)
3812 {
3813 	size_t rlen;
3814 	uint_t cnt;
3815 
3816 	/*
3817 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3818 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3819 	 * to make sure buffer has enough room for overrun.
3820 	 */
3821 	if (size & 7) {
3822 		size += 8 - (size & 7);
3823 	}
3824 
3825 	/* Adjust the alignment if requested */
3826 	if (alignment) {
3827 		dma_attr.dma_attr_align = alignment;
3828 	}
3829 
3830 	/*
3831 	 * Allocate DMA handle
3832 	 */
3833 	if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_DONTWAIT, NULL,
3834 	    dma_handle) != DDI_SUCCESS) {
3835 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3836 		    __func__);
3837 		return (QL_ERROR);
3838 	}
3839 	/*
3840 	 * Allocate DMA memory
3841 	 */
3842 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3843 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3844 	    DDI_DMA_DONTWAIT,
3845 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3846 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3847 		ddi_dma_free_handle(dma_handle);
3848 		return (QL_ERROR);
3849 	}
3850 
3851 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3852 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3853 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3854 		ddi_dma_mem_free(acc_handle);
3855 
3856 		ddi_dma_free_handle(dma_handle);
3857 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3858 		    __func__);
3859 		return (QL_ERROR);
3860 	}
3861 
3862 	if (cnt != 1) {
3863 
3864 		ql_free_phys(dma_handle, acc_handle);
3865 
3866 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3867 		    __func__);
3868 		return (QL_ERROR);
3869 	}
3870 
3871 	bzero((caddr_t)*vaddr, rlen);
3872 
3873 	return (0);
3874 }
3875 
3876 /*
3877  * Add interrupt handlers based on the interrupt type.
3878  * Before adding the interrupt handlers, the interrupt vectors should
3879  * have been allocated, and the rx/tx rings have also been allocated.
3880  */
3881 static int
3882 ql_add_intr_handlers(qlge_t *qlge)
3883 {
3884 	int vector = 0;
3885 	int rc, i;
3886 	uint32_t value;
3887 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3888 
3889 	switch (qlge->intr_type) {
3890 	case DDI_INTR_TYPE_MSIX:
3891 		/*
3892 		 * Add interrupt handler for rx and tx rings: vector[0 -
3893 		 * (qlge->intr_cnt -1)].
3894 		 */
3895 		value = 0;
3896 		for (vector = 0; vector < qlge->intr_cnt; vector++) {
3897 			ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3898 
3899 			/*
3900 			 * associate interrupt vector with interrupt handler
3901 			 */
3902 			rc = ddi_intr_add_handler(qlge->htable[vector],
3903 			    (ddi_intr_handler_t *)intr_ctx->handler,
3904 			    (void *)&qlge->rx_ring[vector], NULL);
3905 
3906 			QL_PRINT(DBG_INIT, ("rx_ring[%d] 0x%p\n",
3907 			    vector, &qlge->rx_ring[vector]));
3908 			if (rc != DDI_SUCCESS) {
3909 				QL_PRINT(DBG_INIT,
3910 				    ("Add rx interrupt handler failed. "
3911 				    "return: %d, vector: %d", rc, vector));
3912 				for (vector--; vector >= 0; vector--) {
3913 					(void) ddi_intr_remove_handler(
3914 					    qlge->htable[vector]);
3915 				}
3916 				return (DDI_FAILURE);
3917 			}
3918 			intr_ctx++;
3919 		}
3920 		break;
3921 
3922 	case DDI_INTR_TYPE_MSI:
3923 		/*
3924 		 * Add interrupt handlers for the only vector
3925 		 */
3926 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3927 
3928 		rc = ddi_intr_add_handler(qlge->htable[vector],
3929 		    ql_isr,
3930 		    (caddr_t)&qlge->rx_ring[0], NULL);
3931 
3932 		if (rc != DDI_SUCCESS) {
3933 			QL_PRINT(DBG_INIT,
3934 			    ("Add MSI interrupt handler failed: %d\n", rc));
3935 			return (DDI_FAILURE);
3936 		}
3937 		break;
3938 
3939 	case DDI_INTR_TYPE_FIXED:
3940 		/*
3941 		 * Add interrupt handlers for the only vector
3942 		 */
3943 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3944 
3945 		rc = ddi_intr_add_handler(qlge->htable[vector],
3946 		    ql_isr,
3947 		    (caddr_t)&qlge->rx_ring[0], NULL);
3948 
3949 		if (rc != DDI_SUCCESS) {
3950 			QL_PRINT(DBG_INIT,
3951 			    ("Add legacy interrupt handler failed: %d\n", rc));
3952 			return (DDI_FAILURE);
3953 		}
3954 		break;
3955 
3956 	default:
3957 		return (DDI_FAILURE);
3958 	}
3959 
3960 	/* Enable interrupts */
3961 	/* Block enable */
3962 	if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3963 		QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3964 		    qlge->intr_cnt));
3965 		(void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3966 	} else { /* Non block enable */
3967 		for (i = 0; i < qlge->intr_cnt; i++) {
3968 			QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d "
3969 			    "handle 0x%x\n", i, qlge->htable[i]));
3970 			(void) ddi_intr_enable(qlge->htable[i]);
3971 		}
3972 	}
3973 	qlge->sequence |= INIT_INTR_ENABLED;
3974 
3975 	return (DDI_SUCCESS);
3976 }
3977 
3978 /*
3979  * Here we build the intr_ctx structures based on
3980  * our rx_ring count and intr vector count.
3981  * The intr_ctx structure is used to hook each vector
3982  * to possibly different handlers.
3983  */
3984 static void
3985 ql_resolve_queues_to_irqs(qlge_t *qlge)
3986 {
3987 	int i = 0;
3988 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3989 
3990 	if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
3991 		/*
3992 		 * Each rx_ring has its own intr_ctx since we
3993 		 * have separate vectors for each queue.
3994 		 * This only true when MSI-X is enabled.
3995 		 */
3996 		for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
3997 			qlge->rx_ring[i].irq = i;
3998 			intr_ctx->intr = i;
3999 			intr_ctx->qlge = qlge;
4000 
4001 			/*
4002 			 * We set up each vectors enable/disable/read bits so
4003 			 * there's no bit/mask calculations in critical path.
4004 			 */
4005 			intr_ctx->intr_en_mask =
4006 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4007 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4008 			    INTR_EN_IHD | i;
4009 			intr_ctx->intr_dis_mask =
4010 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4011 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4012 			    INTR_EN_IHD | i;
4013 			intr_ctx->intr_read_mask =
4014 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4015 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4016 			    | i;
4017 
4018 			if (i == 0) {
4019 				/*
4020 				 * Default queue handles bcast/mcast plus
4021 				 * async events.
4022 				 */
4023 				intr_ctx->handler = ql_isr;
4024 			} else if (qlge->rx_ring[i].type == TX_Q) {
4025 				/*
4026 				 * Outbound queue is for outbound completions
4027 				 * only.
4028 				 */
4029 				if (qlge->isr_stride)
4030 					intr_ctx->handler = ql_msix_isr;
4031 				else
4032 					intr_ctx->handler = ql_msix_tx_isr;
4033 			} else {
4034 				/*
4035 				 * Inbound queues handle unicast frames only.
4036 				 */
4037 				if (qlge->isr_stride)
4038 					intr_ctx->handler = ql_msix_isr;
4039 				else
4040 					intr_ctx->handler = ql_msix_rx_isr;
4041 			}
4042 		}
4043 		i = qlge->intr_cnt;
4044 		for (; i < qlge->rx_ring_count; i++, intr_ctx++) {
4045 			int iv = i - qlge->isr_stride;
4046 			qlge->rx_ring[i].irq = iv;
4047 			intr_ctx->intr = iv;
4048 			intr_ctx->qlge = qlge;
4049 
4050 			/*
4051 			 * We set up each vectors enable/disable/read bits so
4052 			 * there's no bit/mask calculations in critical path.
4053 			 */
4054 			intr_ctx->intr_en_mask =
4055 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4056 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
4057 			    INTR_EN_IHD | iv;
4058 			intr_ctx->intr_dis_mask =
4059 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4060 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
4061 			    INTR_EN_IHD | iv;
4062 			intr_ctx->intr_read_mask =
4063 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4064 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
4065 			    | iv;
4066 
4067 			if (qlge->rx_ring[i].type == TX_Q) {
4068 				/*
4069 				 * Outbound queue is for outbound completions
4070 				 * only.
4071 				 */
4072 				intr_ctx->handler = ql_msix_isr;
4073 			} else {
4074 				/*
4075 				 * Inbound queues handle unicast frames only.
4076 				 */
4077 				intr_ctx->handler = ql_msix_rx_isr;
4078 			}
4079 		}
4080 	} else {
4081 		/*
4082 		 * All rx_rings use the same intr_ctx since
4083 		 * there is only one vector.
4084 		 */
4085 		intr_ctx->intr = 0;
4086 		intr_ctx->qlge = qlge;
4087 		/*
4088 		 * We set up each vectors enable/disable/read bits so
4089 		 * there's no bit/mask calculations in the critical path.
4090 		 */
4091 		intr_ctx->intr_en_mask =
4092 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4093 		    INTR_EN_TYPE_ENABLE;
4094 		intr_ctx->intr_dis_mask =
4095 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4096 		    INTR_EN_TYPE_DISABLE;
4097 		intr_ctx->intr_read_mask =
4098 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4099 		    INTR_EN_TYPE_READ;
4100 		/*
4101 		 * Single interrupt means one handler for all rings.
4102 		 */
4103 		intr_ctx->handler = ql_isr;
4104 		for (i = 0; i < qlge->rx_ring_count; i++)
4105 			qlge->rx_ring[i].irq = 0;
4106 	}
4107 }
4108 
4109 
4110 /*
4111  * Free allocated interrupts.
4112  */
4113 static void
4114 ql_free_irq_vectors(qlge_t *qlge)
4115 {
4116 	int i;
4117 	int rc;
4118 
4119 	if (qlge->sequence & INIT_INTR_ENABLED) {
4120 		/* Disable all interrupts */
4121 		if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
4122 			/* Call ddi_intr_block_disable() */
4123 			(void) ddi_intr_block_disable(qlge->htable,
4124 			    qlge->intr_cnt);
4125 		} else {
4126 			for (i = 0; i < qlge->intr_cnt; i++) {
4127 				(void) ddi_intr_disable(qlge->htable[i]);
4128 			}
4129 		}
4130 
4131 		qlge->sequence &= ~INIT_INTR_ENABLED;
4132 	}
4133 
4134 	for (i = 0; i < qlge->intr_cnt; i++) {
4135 
4136 		if (qlge->sequence & INIT_ADD_INTERRUPT)
4137 			(void) ddi_intr_remove_handler(qlge->htable[i]);
4138 
4139 		if (qlge->sequence & INIT_INTR_ALLOC) {
4140 			rc = ddi_intr_free(qlge->htable[i]);
4141 			if (rc != DDI_SUCCESS) {
4142 				/* EMPTY */
4143 				QL_PRINT(DBG_INIT, ("Free intr failed: %d",
4144 				    rc));
4145 			}
4146 		}
4147 	}
4148 	if (qlge->sequence & INIT_INTR_ALLOC)
4149 		qlge->sequence &= ~INIT_INTR_ALLOC;
4150 
4151 	if (qlge->sequence & INIT_ADD_INTERRUPT)
4152 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4153 
4154 	if (qlge->htable) {
4155 		kmem_free(qlge->htable, qlge->intr_size);
4156 		qlge->htable = NULL;
4157 	}
4158 }
4159 
4160 /*
4161  * Allocate interrupt vectors
4162  * For legacy and MSI, only 1 handle is needed.
4163  * For MSI-X, if fewer than 2 vectors are available, return failure.
4164  * Upon success, this maps the vectors to rx and tx rings for
4165  * interrupts.
4166  */
4167 static int
4168 ql_request_irq_vectors(qlge_t *qlge, int intr_type)
4169 {
4170 	dev_info_t *devinfo;
4171 	uint32_t request, orig;
4172 	int count, avail, actual;
4173 	int minimum;
4174 	int rc;
4175 
4176 	devinfo = qlge->dip;
4177 
4178 	switch (intr_type) {
4179 	case DDI_INTR_TYPE_FIXED:
4180 		request = 1;	/* Request 1 legacy interrupt handle */
4181 		minimum = 1;
4182 		QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
4183 		break;
4184 
4185 	case DDI_INTR_TYPE_MSI:
4186 		request = 1;	/* Request 1 MSI interrupt handle */
4187 		minimum = 1;
4188 		QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
4189 		break;
4190 
4191 	case DDI_INTR_TYPE_MSIX:
4192 		/*
4193 		 * Ideal number of vectors for the adapter is
4194 		 * # rss rings + tx completion rings for default completion
4195 		 * queue.
4196 		 */
4197 		request = qlge->rx_ring_count;
4198 
4199 		orig = request;
4200 		if (request > (MAX_RX_RINGS))
4201 			request = MAX_RX_RINGS;
4202 		minimum = 2;
4203 		QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
4204 		break;
4205 
4206 	default:
4207 		QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
4208 		return (DDI_FAILURE);
4209 	}
4210 
4211 	QL_PRINT(DBG_INIT, ("interrupt handles requested: %d  minimum: %d\n",
4212 	    request, minimum));
4213 
4214 	/*
4215 	 * Get number of supported interrupts
4216 	 */
4217 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4218 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
4219 		QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
4220 		    "count: %d\n", rc, count));
4221 		return (DDI_FAILURE);
4222 	}
4223 	QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
4224 
4225 	/*
4226 	 * Get number of available interrupts
4227 	 */
4228 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4229 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
4230 		QL_PRINT(DBG_INIT,
4231 		    ("Get interrupt available number failed. Return:"
4232 		    " %d, available: %d\n", rc, avail));
4233 		return (DDI_FAILURE);
4234 	}
4235 	QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
4236 
4237 	if (avail < request) {
4238 		QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
4239 		    request, avail));
4240 		request = avail;
4241 	}
4242 
4243 	actual = 0;
4244 	qlge->intr_cnt = 0;
4245 
4246 	/*
4247 	 * Allocate an array of interrupt handles
4248 	 */
4249 	qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
4250 	qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
4251 
4252 	rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
4253 	    (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
4254 	if (rc != DDI_SUCCESS) {
4255 		cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
4256 		    " %d, request: %d, actual: %d",
4257 		    __func__, qlge->instance, rc, request, actual);
4258 		goto ql_intr_alloc_fail;
4259 	}
4260 	qlge->intr_cnt = actual;
4261 
4262 	qlge->sequence |= INIT_INTR_ALLOC;
4263 
4264 	/*
4265 	 * If the actual number of vectors is less than the minumum
4266 	 * then fail.
4267 	 */
4268 	if (actual < minimum) {
4269 		cmn_err(CE_WARN,
4270 		    "Insufficient interrupt handles available: %d", actual);
4271 		goto ql_intr_alloc_fail;
4272 	}
4273 
4274 	/*
4275 	 * For MSI-X, actual might force us to reduce number of tx & rx rings
4276 	 */
4277 	if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
4278 		if (actual >= (orig / 2)) {
4279 			count = orig / 2;
4280 			qlge->rss_ring_count = count;
4281 			qlge->tx_ring_count = count;
4282 			qlge->isr_stride = count;
4283 		} else if (actual >= (orig / 4)) {
4284 			count = orig / 4;
4285 			qlge->rss_ring_count = count;
4286 			qlge->tx_ring_count = count;
4287 			qlge->isr_stride = count;
4288 		} else if (actual >= (orig / 8)) {
4289 			count = orig / 8;
4290 			qlge->rss_ring_count = count;
4291 			qlge->tx_ring_count = count;
4292 			qlge->isr_stride = count;
4293 		} else if (actual < MAX_RX_RINGS) {
4294 			qlge->tx_ring_count = 1;
4295 			qlge->rss_ring_count = actual - 1;
4296 		}
4297 		qlge->intr_cnt = count;
4298 		qlge->rx_ring_count = qlge->tx_ring_count +
4299 		    qlge->rss_ring_count;
4300 	}
4301 	cmn_err(CE_NOTE, "!qlge(%d) tx %d, rss %d, stride %d\n", qlge->instance,
4302 	    qlge->tx_ring_count, qlge->rss_ring_count, qlge->isr_stride);
4303 
4304 	/*
4305 	 * Get priority for first vector, assume remaining are all the same
4306 	 */
4307 	rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
4308 	if (rc != DDI_SUCCESS) {
4309 		QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
4310 		goto ql_intr_alloc_fail;
4311 	}
4312 
4313 	rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
4314 	if (rc != DDI_SUCCESS) {
4315 		QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
4316 		goto ql_intr_alloc_fail;
4317 	}
4318 
4319 	qlge->intr_type = intr_type;
4320 
4321 	return (DDI_SUCCESS);
4322 
4323 ql_intr_alloc_fail:
4324 	ql_free_irq_vectors(qlge);
4325 
4326 	return (DDI_FAILURE);
4327 }
4328 
4329 /*
4330  * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
4331  * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
4332  * transmit queue.
4333  */
4334 int
4335 ql_alloc_irqs(qlge_t *qlge)
4336 {
4337 	int intr_types;
4338 	int rval;
4339 
4340 	/*
4341 	 * Get supported interrupt types
4342 	 */
4343 	if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
4344 	    != DDI_SUCCESS) {
4345 		cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
4346 		    __func__, qlge->instance);
4347 
4348 		return (DDI_FAILURE);
4349 	}
4350 
4351 	QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
4352 	    __func__, qlge->instance, intr_types));
4353 
4354 	/* Install MSI-X interrupts */
4355 	if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
4356 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
4357 		    __func__, qlge->instance, intr_types));
4358 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
4359 		if (rval == DDI_SUCCESS) {
4360 			return (rval);
4361 		}
4362 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
4363 		    " trying MSI interrupts ...\n", __func__, qlge->instance));
4364 	}
4365 
4366 	/*
4367 	 * We will have 2 completion queues in MSI / Legacy mode,
4368 	 * Queue 0 for default completions
4369 	 * Queue 1 for transmit completions
4370 	 */
4371 	qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
4372 	qlge->tx_ring_count = 1; /* Single tx completion queue */
4373 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
4374 
4375 	QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
4376 	    __func__, qlge->instance));
4377 	/*
4378 	 * Add the h/w interrupt handler and initialise mutexes
4379 	 */
4380 	rval = DDI_FAILURE;
4381 
4382 	/*
4383 	 * If OS supports MSIX interrupt but fails to allocate, then try
4384 	 * MSI interrupt. If MSI interrupt allocation fails also, then roll
4385 	 * back to fixed interrupt.
4386 	 */
4387 	if (intr_types & DDI_INTR_TYPE_MSI) {
4388 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
4389 		if (rval == DDI_SUCCESS) {
4390 			qlge->intr_type = DDI_INTR_TYPE_MSI;
4391 			QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
4392 			    __func__, qlge->instance));
4393 		}
4394 	}
4395 
4396 	/* Try Fixed interrupt Legacy mode */
4397 	if (rval != DDI_SUCCESS) {
4398 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
4399 		if (rval != DDI_SUCCESS) {
4400 			cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
4401 			    "allocation failed",
4402 			    __func__, qlge->instance);
4403 		} else {
4404 			qlge->intr_type = DDI_INTR_TYPE_FIXED;
4405 			QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
4406 			    __func__, qlge->instance));
4407 		}
4408 	}
4409 
4410 	return (rval);
4411 }
4412 
4413 static void
4414 ql_free_rx_tx_locks(qlge_t *qlge)
4415 {
4416 	int i;
4417 	struct rx_ring *rx_ring;
4418 	struct tx_ring *tx_ring;
4419 
4420 	for (i = 0; i < qlge->tx_ring_count; i++) {
4421 		tx_ring = &qlge->tx_ring[i];
4422 		mutex_destroy(&tx_ring->tx_lock);
4423 	}
4424 
4425 	for (i = 0; i < qlge->rx_ring_count; i++) {
4426 		rx_ring = &qlge->rx_ring[i];
4427 		mutex_destroy(&rx_ring->rx_lock);
4428 		mutex_destroy(&rx_ring->sbq_lock);
4429 		mutex_destroy(&rx_ring->lbq_lock);
4430 	}
4431 }
4432 
4433 /*
4434  * Frees all resources allocated during attach.
4435  *
4436  * Input:
4437  * dip = pointer to device information structure.
4438  * sequence = bits indicating resources to free.
4439  *
4440  * Context:
4441  * Kernel context.
4442  */
4443 static void
4444 ql_free_resources(qlge_t *qlge)
4445 {
4446 
4447 	/* Disable driver timer */
4448 	ql_stop_timer(qlge);
4449 
4450 	if (qlge->sequence & INIT_MAC_REGISTERED) {
4451 		(void) mac_unregister(qlge->mh);
4452 		qlge->sequence &= ~INIT_MAC_REGISTERED;
4453 	}
4454 
4455 	if (qlge->sequence & INIT_MAC_ALLOC) {
4456 		/* Nothing to do, macp is already freed */
4457 		qlge->sequence &= ~INIT_MAC_ALLOC;
4458 	}
4459 
4460 	if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4461 		pci_config_teardown(&qlge->pci_handle);
4462 		qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4463 	}
4464 
4465 	if (qlge->sequence & INIT_ADD_INTERRUPT) {
4466 		ql_free_irq_vectors(qlge);
4467 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4468 	}
4469 
4470 	if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4471 		(void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4472 		(void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4473 		(void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4474 		qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4475 	}
4476 
4477 	if (qlge->sequence & INIT_KSTATS) {
4478 		ql_fini_kstats(qlge);
4479 		qlge->sequence &= ~INIT_KSTATS;
4480 	}
4481 
4482 	if (qlge->sequence & INIT_MUTEX) {
4483 		mutex_destroy(&qlge->gen_mutex);
4484 		mutex_destroy(&qlge->hw_mutex);
4485 		mutex_destroy(&qlge->mbx_mutex);
4486 		cv_destroy(&qlge->cv_mbx_intr);
4487 		qlge->sequence &= ~INIT_MUTEX;
4488 	}
4489 
4490 	if (qlge->sequence & INIT_LOCKS_CREATED) {
4491 		ql_free_rx_tx_locks(qlge);
4492 		qlge->sequence &= ~INIT_LOCKS_CREATED;
4493 	}
4494 
4495 	if (qlge->sequence & INIT_MEMORY_ALLOC) {
4496 		ql_free_mem_resources(qlge);
4497 		qlge->sequence &= ~INIT_MEMORY_ALLOC;
4498 	}
4499 
4500 	if (qlge->sequence & INIT_REGS_SETUP) {
4501 		ddi_regs_map_free(&qlge->dev_handle);
4502 		qlge->sequence &= ~INIT_REGS_SETUP;
4503 	}
4504 
4505 	if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4506 		ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4507 		qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4508 	}
4509 
4510 	/*
4511 	 * free flash flt table that allocated in attach stage
4512 	 */
4513 	if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4514 	    (qlge->flt.header.length != 0)) {
4515 		kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4516 		qlge->flt.ql_flt_entry_ptr = NULL;
4517 	}
4518 
4519 	if (qlge->sequence & INIT_FM) {
4520 		ql_fm_fini(qlge);
4521 		qlge->sequence &= ~INIT_FM;
4522 	}
4523 
4524 	ddi_prop_remove_all(qlge->dip);
4525 	ddi_set_driver_private(qlge->dip, NULL);
4526 
4527 	/* finally, free qlge structure */
4528 	if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4529 		kmem_free(qlge, sizeof (qlge_t));
4530 	}
4531 }
4532 
4533 /*
4534  * Set promiscuous mode of the driver
4535  * Caller must catch HW_LOCK
4536  */
4537 void
4538 ql_set_promiscuous(qlge_t *qlge, int mode)
4539 {
4540 	if (mode) {
4541 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4542 		    RT_IDX_VALID, 1);
4543 	} else {
4544 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4545 		    RT_IDX_VALID, 0);
4546 	}
4547 }
4548 /*
4549  * Write 'data1' to Mac Protocol Address Index Register and
4550  * 'data2' to Mac Protocol Address Data Register
4551  *  Assuming that the Mac Protocol semaphore lock has been acquired.
4552  */
4553 static int
4554 ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4555 {
4556 	int return_value = DDI_SUCCESS;
4557 
4558 	if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4559 	    MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4560 		cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4561 		    "timeout.");
4562 		return_value = DDI_FAILURE;
4563 		goto out;
4564 	}
4565 	ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4566 	ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4567 out:
4568 	return (return_value);
4569 }
4570 /*
4571  * Enable the 'index'ed multicast address in the host memory's multicast_list
4572  */
4573 int
4574 ql_add_multicast_address(qlge_t *qlge, int index)
4575 {
4576 	int rtn_val = DDI_FAILURE;
4577 	uint32_t offset;
4578 	uint32_t value1, value2;
4579 
4580 	/* Acquire the required semaphore */
4581 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4582 		return (rtn_val);
4583 	}
4584 
4585 	/* Program Offset0 - lower 32 bits of the MAC address */
4586 	offset = 0;
4587 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4588 	    (index << 4) | offset;
4589 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4590 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4591 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4592 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4593 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4594 		goto out;
4595 
4596 	/* Program offset1: upper 16 bits of the MAC address */
4597 	offset = 1;
4598 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4599 	    (index<<4) | offset;
4600 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4601 	    |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4602 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4603 		goto out;
4604 	}
4605 	rtn_val = DDI_SUCCESS;
4606 out:
4607 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4608 	return (rtn_val);
4609 }
4610 
4611 /*
4612  * Disable the 'index'ed multicast address in the host memory's multicast_list
4613  */
4614 int
4615 ql_remove_multicast_address(qlge_t *qlge, int index)
4616 {
4617 	int rtn_val = DDI_FAILURE;
4618 	uint32_t offset;
4619 	uint32_t value1, value2;
4620 
4621 	/* Acquire the required semaphore */
4622 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4623 		return (rtn_val);
4624 	}
4625 	/* Program Offset0 - lower 32 bits of the MAC address */
4626 	offset = 0;
4627 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4628 	value2 =
4629 	    ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4630 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4631 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4632 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4633 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4634 		goto out;
4635 	}
4636 	/* Program offset1: upper 16 bits of the MAC address */
4637 	offset = 1;
4638 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4639 	value2 = 0;
4640 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4641 		goto out;
4642 	}
4643 	rtn_val = DDI_SUCCESS;
4644 out:
4645 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4646 	return (rtn_val);
4647 }
4648 
4649 /*
4650  * Add a new multicast address to the list of supported list
4651  * This API is called after OS called gld_set_multicast (GLDv2)
4652  * or m_multicst (GLDv3)
4653  *
4654  * Restriction:
4655  * The number of maximum multicast address is limited by hardware.
4656  */
4657 int
4658 ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4659 {
4660 	uint32_t index = qlge->multicast_list_count;
4661 	int rval = DDI_SUCCESS;
4662 	int status;
4663 
4664 	if ((ep[0] & 01) == 0) {
4665 		rval = EINVAL;
4666 		goto exit;
4667 	}
4668 
4669 	/* if there is an availabe space in multicast_list, then add it */
4670 	if (index < MAX_MULTICAST_LIST_SIZE) {
4671 		bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4672 		    ETHERADDRL);
4673 		/* increment the total number of addresses in multicast list */
4674 		(void) ql_add_multicast_address(qlge, index);
4675 		qlge->multicast_list_count++;
4676 		QL_PRINT(DBG_GLD,
4677 		    ("%s(%d): added to index of multicast list= 0x%x, "
4678 		    "total %d\n", __func__, qlge->instance, index,
4679 		    qlge->multicast_list_count));
4680 
4681 		if (index > MAX_MULTICAST_HW_SIZE) {
4682 			if (!qlge->multicast_promisc) {
4683 				status = ql_set_routing_reg(qlge,
4684 				    RT_IDX_ALLMULTI_SLOT,
4685 				    RT_IDX_MCAST, 1);
4686 				if (status) {
4687 					cmn_err(CE_WARN,
4688 					    "Failed to init routing reg "
4689 					    "for mcast promisc mode.");
4690 					rval = ENOENT;
4691 					goto exit;
4692 				}
4693 				qlge->multicast_promisc = B_TRUE;
4694 			}
4695 		}
4696 	} else {
4697 		rval = ENOENT;
4698 	}
4699 exit:
4700 	return (rval);
4701 }
4702 
4703 /*
4704  * Remove an old multicast address from the list of supported multicast
4705  * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4706  * or m_multicst (GLDv3)
4707  * The number of maximum multicast address is limited by hardware.
4708  */
4709 int
4710 ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4711 {
4712 	uint32_t total = qlge->multicast_list_count;
4713 	int i = 0;
4714 	int rmv_index = 0;
4715 	size_t length = sizeof (ql_multicast_addr);
4716 	int status;
4717 
4718 	for (i = 0; i < total; i++) {
4719 		if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4720 			continue;
4721 		}
4722 
4723 		rmv_index = i;
4724 		/* block move the reset of other multicast address forward */
4725 		length = ((total -1) -i) * sizeof (ql_multicast_addr);
4726 		if (length > 0) {
4727 			bcopy(&qlge->multicast_list[i+1],
4728 			    &qlge->multicast_list[i], length);
4729 		}
4730 		qlge->multicast_list_count--;
4731 		if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4732 			/*
4733 			 * there is a deletion in multicast list table,
4734 			 * re-enable them
4735 			 */
4736 			for (i = rmv_index; i < qlge->multicast_list_count;
4737 			    i++) {
4738 				(void) ql_add_multicast_address(qlge, i);
4739 			}
4740 			/* and disable the last one */
4741 			(void) ql_remove_multicast_address(qlge, i);
4742 
4743 			/* disable multicast promiscuous mode */
4744 			if (qlge->multicast_promisc) {
4745 				status = ql_set_routing_reg(qlge,
4746 				    RT_IDX_ALLMULTI_SLOT,
4747 				    RT_IDX_MCAST, 0);
4748 				if (status) {
4749 					cmn_err(CE_WARN,
4750 					    "Failed to init routing reg for "
4751 					    "mcast promisc mode.");
4752 					goto exit;
4753 				}
4754 				/* write to config register */
4755 				qlge->multicast_promisc = B_FALSE;
4756 			}
4757 		}
4758 		break;
4759 	}
4760 exit:
4761 	return (DDI_SUCCESS);
4762 }
4763 
4764 /*
4765  * Read a XGMAC register
4766  */
4767 int
4768 ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4769 {
4770 	int rtn_val = DDI_FAILURE;
4771 
4772 	/* wait for XGMAC Address register RDY bit set */
4773 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4774 	    BIT_SET, 10) != DDI_SUCCESS) {
4775 		goto out;
4776 	}
4777 	/* start rx transaction */
4778 	ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4779 
4780 	/*
4781 	 * wait for XGMAC Address register RDY bit set,
4782 	 * which indicates data is ready
4783 	 */
4784 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4785 	    BIT_SET, 10) != DDI_SUCCESS) {
4786 		goto out;
4787 	}
4788 	/* read data from XGAMC_DATA register */
4789 	*val = ql_read_reg(qlge, REG_XGMAC_DATA);
4790 	rtn_val = DDI_SUCCESS;
4791 out:
4792 	return (rtn_val);
4793 }
4794 
4795 /*
4796  * Implement checksum offload for IPv4 IP packets
4797  */
4798 static void
4799 ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4800     struct ob_mac_iocb_req *mac_iocb_ptr)
4801 {
4802 	struct ip *iphdr = NULL;
4803 	struct ether_header *ethhdr;
4804 	struct ether_vlan_header *ethvhdr;
4805 	struct tcphdr *tcp_hdr;
4806 	uint32_t etherType;
4807 	int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4808 	int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4809 
4810 	ethhdr  = (struct ether_header *)((void *)bp);
4811 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4812 	/* Is this vlan packet? */
4813 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4814 		mac_hdr_len = sizeof (struct ether_vlan_header);
4815 		etherType = ntohs(ethvhdr->ether_type);
4816 	} else {
4817 		mac_hdr_len = sizeof (struct ether_header);
4818 		etherType = ntohs(ethhdr->ether_type);
4819 	}
4820 	/* Is this IPv4 or IPv6 packet? */
4821 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4822 	    IPV4_VERSION) {
4823 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4824 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4825 		} else {
4826 			/* EMPTY */
4827 			QL_PRINT(DBG_TX,
4828 			    ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4829 			    __func__, qlge->instance, etherType));
4830 		}
4831 	}
4832 	/* ipV4 packets */
4833 	if (iphdr != NULL) {
4834 
4835 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4836 		QL_PRINT(DBG_TX,
4837 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4838 		    " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4839 
4840 		ip_hdr_off = mac_hdr_len;
4841 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4842 		    __func__, qlge->instance, ip_hdr_len));
4843 
4844 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4845 		    OB_MAC_IOCB_REQ_IPv4);
4846 
4847 		if (pflags & HCK_IPV4_HDRCKSUM) {
4848 			QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4849 			    __func__, qlge->instance));
4850 			mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4851 			mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4852 			    OB_MAC_IOCB_REQ_IC);
4853 			iphdr->ip_sum = 0;
4854 			mac_iocb_ptr->hdr_off = (uint16_t)
4855 			    cpu_to_le16(ip_hdr_off);
4856 		}
4857 		if (pflags & HCK_FULLCKSUM) {
4858 			if (iphdr->ip_p == IPPROTO_TCP) {
4859 				tcp_hdr =
4860 				    (struct tcphdr *)(void *)
4861 				    ((uint8_t *)(void *)iphdr + ip_hdr_len);
4862 				QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4863 				    __func__, qlge->instance));
4864 				mac_iocb_ptr->opcode =
4865 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4866 				mac_iocb_ptr->flag1 =
4867 				    (uint8_t)(mac_iocb_ptr->flag1 |
4868 				    OB_MAC_IOCB_REQ_TC);
4869 				mac_iocb_ptr->flag2 =
4870 				    (uint8_t)(mac_iocb_ptr->flag2 |
4871 				    OB_MAC_IOCB_REQ_IC);
4872 				iphdr->ip_sum = 0;
4873 				tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4874 				tcp_udp_hdr_len = tcp_hdr->th_off*4;
4875 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4876 				    __func__, qlge->instance, tcp_udp_hdr_len));
4877 				hdr_off = ip_hdr_off;
4878 				tcp_udp_hdr_off <<= 6;
4879 				hdr_off |= tcp_udp_hdr_off;
4880 				mac_iocb_ptr->hdr_off =
4881 				    (uint16_t)cpu_to_le16(hdr_off);
4882 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4883 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4884 				    tcp_udp_hdr_len);
4885 
4886 				/*
4887 				 * if the chip is unable to do pseudo header
4888 				 * cksum calculation, do it in then put the
4889 				 * result to the data passed to the chip
4890 				 */
4891 				if (qlge->cfg_flags &
4892 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4893 					ql_pseudo_cksum((uint8_t *)iphdr);
4894 				}
4895 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4896 				QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4897 				    __func__, qlge->instance));
4898 				mac_iocb_ptr->opcode =
4899 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4900 				mac_iocb_ptr->flag1 =
4901 				    (uint8_t)(mac_iocb_ptr->flag1 |
4902 				    OB_MAC_IOCB_REQ_UC);
4903 				mac_iocb_ptr->flag2 =
4904 				    (uint8_t)(mac_iocb_ptr->flag2 |
4905 				    OB_MAC_IOCB_REQ_IC);
4906 				iphdr->ip_sum = 0;
4907 				tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4908 				tcp_udp_hdr_len = sizeof (struct udphdr);
4909 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4910 				    __func__, qlge->instance, tcp_udp_hdr_len));
4911 				hdr_off = ip_hdr_off;
4912 				tcp_udp_hdr_off <<= 6;
4913 				hdr_off |= tcp_udp_hdr_off;
4914 				mac_iocb_ptr->hdr_off =
4915 				    (uint16_t)cpu_to_le16(hdr_off);
4916 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4917 				    cpu_to_le16(mac_hdr_len + ip_hdr_len
4918 				    + tcp_udp_hdr_len);
4919 
4920 				/*
4921 				 * if the chip is unable to calculate pseudo
4922 				 * hdr cksum,do it in then put the result to
4923 				 * the data passed to the chip
4924 				 */
4925 				if (qlge->cfg_flags &
4926 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4927 					ql_pseudo_cksum((uint8_t *)iphdr);
4928 				}
4929 			}
4930 		}
4931 	}
4932 }
4933 
4934 /*
4935  * For TSO/LSO:
4936  * MAC frame transmission with TCP large segment offload is performed in the
4937  * same way as the MAC frame transmission with checksum offload with the
4938  * exception that the maximum TCP segment size (MSS) must be specified to
4939  * allow the chip to segment the data into legal sized frames.
4940  * The host also needs to calculate a pseudo-header checksum over the
4941  * following fields:
4942  * Source IP Address, Destination IP Address, and the Protocol.
4943  * The TCP length is not included in the pseudo-header calculation.
4944  * The pseudo-header checksum is place in the TCP checksum field of the
4945  * prototype header.
4946  */
4947 static void
4948 ql_lso_pseudo_cksum(uint8_t *buf)
4949 {
4950 	uint32_t cksum;
4951 	uint16_t iphl;
4952 	uint16_t proto;
4953 
4954 	/*
4955 	 * Calculate the LSO pseudo-header checksum.
4956 	 */
4957 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
4958 	cksum = proto = buf[9];
4959 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
4960 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
4961 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
4962 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
4963 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4964 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4965 
4966 	/*
4967 	 * Point it to the TCP/UDP header, and
4968 	 * update the checksum field.
4969 	 */
4970 	buf += iphl + ((proto == IPPROTO_TCP) ?
4971 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4972 
4973 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
4974 }
4975 
4976 /*
4977  * For IPv4 IP packets, distribute the tx packets evenly among tx rings
4978  */
4979 typedef	uint32_t	ub4; /* unsigned 4-byte quantities */
4980 typedef	uint8_t		ub1;
4981 
4982 #define	hashsize(n)	((ub4)1<<(n))
4983 #define	hashmask(n)	(hashsize(n)-1)
4984 
4985 #define	mix(a, b, c) \
4986 { \
4987 	a -= b; a -= c; a ^= (c>>13); \
4988 	b -= c; b -= a; b ^= (a<<8); \
4989 	c -= a; c -= b; c ^= (b>>13); \
4990 	a -= b; a -= c; a ^= (c>>12);  \
4991 	b -= c; b -= a; b ^= (a<<16); \
4992 	c -= a; c -= b; c ^= (b>>5); \
4993 	a -= b; a -= c; a ^= (c>>3);  \
4994 	b -= c; b -= a; b ^= (a<<10); \
4995 	c -= a; c -= b; c ^= (b>>15); \
4996 }
4997 
4998 ub4
4999 hash(k, length, initval)
5000 register ub1 *k;	/* the key */
5001 register ub4 length;	/* the length of the key */
5002 register ub4 initval;	/* the previous hash, or an arbitrary value */
5003 {
5004 	register ub4 a, b, c, len;
5005 
5006 	/* Set up the internal state */
5007 	len = length;
5008 	a = b = 0x9e3779b9;	/* the golden ratio; an arbitrary value */
5009 	c = initval;		/* the previous hash value */
5010 
5011 	/* handle most of the key */
5012 	while (len >= 12) {
5013 		a += (k[0] +((ub4)k[1]<<8) +((ub4)k[2]<<16) +((ub4)k[3]<<24));
5014 		b += (k[4] +((ub4)k[5]<<8) +((ub4)k[6]<<16) +((ub4)k[7]<<24));
5015 		c += (k[8] +((ub4)k[9]<<8) +((ub4)k[10]<<16)+((ub4)k[11]<<24));
5016 		mix(a, b, c);
5017 		k += 12;
5018 		len -= 12;
5019 	}
5020 
5021 	/* handle the last 11 bytes */
5022 	c += length;
5023 	/* all the case statements fall through */
5024 	switch (len) {
5025 		/* FALLTHRU */
5026 	case 11: c += ((ub4)k[10]<<24);
5027 		/* FALLTHRU */
5028 	case 10: c += ((ub4)k[9]<<16);
5029 		/* FALLTHRU */
5030 	case 9 : c += ((ub4)k[8]<<8);
5031 	/* the first byte of c is reserved for the length */
5032 		/* FALLTHRU */
5033 	case 8 : b += ((ub4)k[7]<<24);
5034 		/* FALLTHRU */
5035 	case 7 : b += ((ub4)k[6]<<16);
5036 		/* FALLTHRU */
5037 	case 6 : b += ((ub4)k[5]<<8);
5038 		/* FALLTHRU */
5039 	case 5 : b += k[4];
5040 		/* FALLTHRU */
5041 	case 4 : a += ((ub4)k[3]<<24);
5042 		/* FALLTHRU */
5043 	case 3 : a += ((ub4)k[2]<<16);
5044 		/* FALLTHRU */
5045 	case 2 : a += ((ub4)k[1]<<8);
5046 		/* FALLTHRU */
5047 	case 1 : a += k[0];
5048 	/* case 0: nothing left to add */
5049 	}
5050 	mix(a, b, c);
5051 	/* report the result */
5052 	return (c);
5053 }
5054 
5055 uint8_t
5056 ql_tx_hashing(qlge_t *qlge, caddr_t bp)
5057 {
5058 	struct ip *iphdr = NULL;
5059 	struct ether_header *ethhdr;
5060 	struct ether_vlan_header *ethvhdr;
5061 	struct tcphdr *tcp_hdr;
5062 	struct udphdr *udp_hdr;
5063 	uint32_t etherType;
5064 	int mac_hdr_len, ip_hdr_len;
5065 	uint32_t h = 0; /* 0 by default */
5066 	uint8_t tx_ring_id = 0;
5067 	uint32_t ip_src_addr = 0;
5068 	uint32_t ip_desc_addr = 0;
5069 	uint16_t src_port = 0;
5070 	uint16_t dest_port = 0;
5071 	uint8_t key[12];
5072 	QL_PRINT(DBG_TX, ("%s(%d) entered \n", __func__, qlge->instance));
5073 
5074 	ethhdr = (struct ether_header *)((void *)bp);
5075 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
5076 
5077 	if (qlge->tx_ring_count == 1)
5078 		return (tx_ring_id);
5079 
5080 	/* Is this vlan packet? */
5081 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5082 		mac_hdr_len = sizeof (struct ether_vlan_header);
5083 		etherType = ntohs(ethvhdr->ether_type);
5084 	} else {
5085 		mac_hdr_len = sizeof (struct ether_header);
5086 		etherType = ntohs(ethhdr->ether_type);
5087 	}
5088 	/* Is this IPv4 or IPv6 packet? */
5089 	if (etherType == ETHERTYPE_IP /* 0800 */) {
5090 		if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len))
5091 		    == IPV4_VERSION) {
5092 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5093 		}
5094 		if (((unsigned long)iphdr) & 0x3) {
5095 			/*  IP hdr not 4-byte aligned */
5096 			return (tx_ring_id);
5097 		}
5098 	}
5099 	/* ipV4 packets */
5100 	if (iphdr) {
5101 
5102 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
5103 		ip_src_addr = iphdr->ip_src.s_addr;
5104 		ip_desc_addr = iphdr->ip_dst.s_addr;
5105 
5106 		if (iphdr->ip_p == IPPROTO_TCP) {
5107 			tcp_hdr = (struct tcphdr *)(void *)
5108 			    ((uint8_t *)iphdr + ip_hdr_len);
5109 			src_port = tcp_hdr->th_sport;
5110 			dest_port = tcp_hdr->th_dport;
5111 		} else if (iphdr->ip_p == IPPROTO_UDP) {
5112 			udp_hdr = (struct udphdr *)(void *)
5113 			    ((uint8_t *)iphdr + ip_hdr_len);
5114 			src_port = udp_hdr->uh_sport;
5115 			dest_port = udp_hdr->uh_dport;
5116 		}
5117 		key[0] = (uint8_t)((ip_src_addr) &0xFF);
5118 		key[1] = (uint8_t)((ip_src_addr >> 8) &0xFF);
5119 		key[2] = (uint8_t)((ip_src_addr >> 16) &0xFF);
5120 		key[3] = (uint8_t)((ip_src_addr >> 24) &0xFF);
5121 		key[4] = (uint8_t)((ip_desc_addr) &0xFF);
5122 		key[5] = (uint8_t)((ip_desc_addr >> 8) &0xFF);
5123 		key[6] = (uint8_t)((ip_desc_addr >> 16) &0xFF);
5124 		key[7] = (uint8_t)((ip_desc_addr >> 24) &0xFF);
5125 		key[8] = (uint8_t)((src_port) &0xFF);
5126 		key[9] = (uint8_t)((src_port >> 8) &0xFF);
5127 		key[10] = (uint8_t)((dest_port) &0xFF);
5128 		key[11] = (uint8_t)((dest_port >> 8) &0xFF);
5129 		h = hash(key, 12, 0); /* return 32 bit */
5130 		tx_ring_id = (h & (qlge->tx_ring_count - 1));
5131 		if (tx_ring_id >= qlge->tx_ring_count) {
5132 			cmn_err(CE_WARN, "%s bad tx_ring_id %d\n",
5133 			    __func__, tx_ring_id);
5134 			tx_ring_id = 0;
5135 		}
5136 	}
5137 	return (tx_ring_id);
5138 }
5139 
5140 /*
5141  * Tell the hardware to do Large Send Offload (LSO)
5142  *
5143  * Some fields in ob_mac_iocb need to be set so hardware can know what is
5144  * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
5145  * in the right place of the packet etc, thus, hardware can process the
5146  * packet correctly.
5147  */
5148 static void
5149 ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
5150     struct ob_mac_iocb_req *mac_iocb_ptr)
5151 {
5152 	struct ip *iphdr = NULL;
5153 	struct ether_header *ethhdr;
5154 	struct ether_vlan_header *ethvhdr;
5155 	struct tcphdr *tcp_hdr;
5156 	struct udphdr *udp_hdr;
5157 	uint32_t etherType;
5158 	uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
5159 	uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
5160 
5161 	ethhdr = (struct ether_header *)(void *)bp;
5162 	ethvhdr = (struct ether_vlan_header *)(void *)bp;
5163 
5164 	/* Is this vlan packet? */
5165 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5166 		mac_hdr_len = sizeof (struct ether_vlan_header);
5167 		etherType = ntohs(ethvhdr->ether_type);
5168 	} else {
5169 		mac_hdr_len = sizeof (struct ether_header);
5170 		etherType = ntohs(ethhdr->ether_type);
5171 	}
5172 	/* Is this IPv4 or IPv6 packet? */
5173 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
5174 	    IPV4_VERSION) {
5175 		if (etherType == ETHERTYPE_IP /* 0800 */) {
5176 			iphdr 	= (struct ip *)(void *)(bp+mac_hdr_len);
5177 		} else {
5178 			/* EMPTY */
5179 			QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
5180 			    " type 0x%x\n",
5181 			    __func__, qlge->instance, etherType));
5182 		}
5183 	}
5184 
5185 	if (iphdr != NULL) { /* ipV4 packets */
5186 		ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
5187 		QL_PRINT(DBG_TX,
5188 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
5189 		    " bytes \n", __func__, qlge->instance, ip_hdr_len));
5190 
5191 		ip_hdr_off = mac_hdr_len;
5192 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
5193 		    __func__, qlge->instance, ip_hdr_len));
5194 
5195 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
5196 		    OB_MAC_IOCB_REQ_IPv4);
5197 		if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
5198 			if (iphdr->ip_p == IPPROTO_TCP) {
5199 				tcp_hdr = (struct tcphdr *)(void *)
5200 				    ((uint8_t *)(void *)iphdr +
5201 				    ip_hdr_len);
5202 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
5203 				    "packet\n",
5204 				    __func__, qlge->instance));
5205 				mac_iocb_ptr->opcode =
5206 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5207 				mac_iocb_ptr->flag1 =
5208 				    (uint8_t)(mac_iocb_ptr->flag1 |
5209 				    OB_MAC_IOCB_REQ_LSO);
5210 				iphdr->ip_sum = 0;
5211 				tcp_udp_hdr_off =
5212 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5213 				tcp_udp_hdr_len =
5214 				    (uint16_t)(tcp_hdr->th_off*4);
5215 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
5216 				    __func__, qlge->instance, tcp_udp_hdr_len));
5217 				hdr_off = ip_hdr_off;
5218 				tcp_udp_hdr_off <<= 6;
5219 				hdr_off |= tcp_udp_hdr_off;
5220 				mac_iocb_ptr->hdr_off =
5221 				    (uint16_t)cpu_to_le16(hdr_off);
5222 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5223 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5224 				    tcp_udp_hdr_len);
5225 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5226 
5227 				/*
5228 				 * if the chip is unable to calculate pseudo
5229 				 * header checksum, do it in then put the result
5230 				 * to the data passed to the chip
5231 				 */
5232 				if (qlge->cfg_flags &
5233 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5234 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5235 			} else if (iphdr->ip_p == IPPROTO_UDP) {
5236 				udp_hdr = (struct udphdr *)(void *)
5237 				    ((uint8_t *)(void *)iphdr
5238 				    + ip_hdr_len);
5239 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
5240 				    "packet\n",
5241 				    __func__, qlge->instance));
5242 				mac_iocb_ptr->opcode =
5243 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5244 				mac_iocb_ptr->flag1 =
5245 				    (uint8_t)(mac_iocb_ptr->flag1 |
5246 				    OB_MAC_IOCB_REQ_LSO);
5247 				iphdr->ip_sum = 0;
5248 				tcp_udp_hdr_off =
5249 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5250 				tcp_udp_hdr_len =
5251 				    (uint16_t)(udp_hdr->uh_ulen*4);
5252 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
5253 				    __func__, qlge->instance, tcp_udp_hdr_len));
5254 				hdr_off = ip_hdr_off;
5255 				tcp_udp_hdr_off <<= 6;
5256 				hdr_off |= tcp_udp_hdr_off;
5257 				mac_iocb_ptr->hdr_off =
5258 				    (uint16_t)cpu_to_le16(hdr_off);
5259 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5260 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5261 				    tcp_udp_hdr_len);
5262 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5263 
5264 				/*
5265 				 * if the chip is unable to do pseudo header
5266 				 * checksum calculation, do it here then put the
5267 				 * result to the data passed to the chip
5268 				 */
5269 				if (qlge->cfg_flags &
5270 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5271 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5272 			}
5273 		}
5274 	}
5275 }
5276 
5277 /*
5278  * Generic packet sending function which is used to send one packet.
5279  */
5280 int
5281 ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
5282 {
5283 	struct tx_ring_desc *tx_cb;
5284 	struct ob_mac_iocb_req *mac_iocb_ptr;
5285 	mblk_t *tp;
5286 	size_t msg_len = 0;
5287 	size_t off;
5288 	caddr_t bp;
5289 	size_t nbyte, total_len;
5290 	uint_t i = 0;
5291 	int j = 0, frags = 0;
5292 	uint32_t phy_addr_low, phy_addr_high;
5293 	uint64_t phys_addr;
5294 	clock_t now;
5295 	uint32_t pflags = 0;
5296 	uint32_t mss = 0;
5297 	enum tx_mode_t tx_mode;
5298 	struct oal_entry *oal_entry;
5299 	int status;
5300 	uint_t ncookies, oal_entries, max_oal_entries;
5301 	size_t max_seg_len = 0;
5302 	boolean_t use_lso = B_FALSE;
5303 	struct oal_entry *tx_entry = NULL;
5304 	struct oal_entry *last_oal_entry;
5305 	qlge_t *qlge = tx_ring->qlge;
5306 	ddi_dma_cookie_t dma_cookie;
5307 	size_t tx_buf_len = QL_MAX_COPY_LENGTH;
5308 	int force_pullup = 0;
5309 
5310 	tp = mp;
5311 	total_len = msg_len = 0;
5312 	max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
5313 
5314 	/* Calculate number of data and segments in the incoming message */
5315 	for (tp = mp; tp != NULL; tp = tp->b_cont) {
5316 		nbyte = MBLKL(tp);
5317 		total_len += nbyte;
5318 		max_seg_len = max(nbyte, max_seg_len);
5319 		QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
5320 		    "total length: %d\n", frags, nbyte));
5321 		frags++;
5322 	}
5323 
5324 	if (total_len >= QL_LSO_MAX) {
5325 		freemsg(mp);
5326 #ifdef QLGE_LOAD_UNLOAD
5327 		cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
5328 		    __func__, (int)total_len);
5329 #endif
5330 		return (NULL);
5331 	}
5332 
5333 	bp = (caddr_t)mp->b_rptr;
5334 	if (bp[0] & 1) {
5335 		if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
5336 		    ETHERADDRL) == 0) {
5337 			QL_PRINT(DBG_TX, ("Broadcast packet\n"));
5338 			tx_ring->brdcstxmt++;
5339 		} else {
5340 			QL_PRINT(DBG_TX, ("multicast packet\n"));
5341 			tx_ring->multixmt++;
5342 		}
5343 	}
5344 
5345 	tx_ring->obytes += total_len;
5346 	tx_ring->opackets ++;
5347 
5348 	QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
5349 	    " max seg len: %d\n", total_len, frags, max_seg_len));
5350 
5351 	/* claim a free slot in tx ring */
5352 	tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
5353 
5354 	/* get the tx descriptor */
5355 	mac_iocb_ptr = tx_cb->queue_entry;
5356 
5357 	bzero((void *)mac_iocb_ptr, 20);
5358 
5359 	ASSERT(tx_cb->mp == NULL);
5360 
5361 	/*
5362 	 * Decide to use DMA map or copy mode.
5363 	 * DMA map mode must be used when the total msg length is more than the
5364 	 * tx buffer length.
5365 	 */
5366 
5367 	if (total_len > tx_buf_len)
5368 		tx_mode = USE_DMA;
5369 	else if	(max_seg_len > QL_MAX_COPY_LENGTH)
5370 		tx_mode = USE_DMA;
5371 	else
5372 		tx_mode = USE_COPY;
5373 
5374 	if (qlge->chksum_cap) {
5375 		mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
5376 		QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
5377 		    "is 0x%x \n", pflags, qlge->chksum_cap));
5378 		if (qlge->lso_enable) {
5379 			uint32_t lso_flags = 0;
5380 			mac_lso_get(mp, &mss, &lso_flags);
5381 			use_lso = (lso_flags == HW_LSO);
5382 		}
5383 		QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
5384 		    mss, use_lso));
5385 	}
5386 
5387 do_pullup:
5388 
5389 	/* concatenate all frags into one large packet if too fragmented */
5390 	if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
5391 	    force_pullup) {
5392 		mblk_t *mp1;
5393 		if ((mp1 = msgpullup(mp, -1)) != NULL) {
5394 			freemsg(mp);
5395 			mp = mp1;
5396 			frags = 1;
5397 		} else {
5398 			tx_ring->tx_fail_dma_bind++;
5399 			goto bad;
5400 		}
5401 	}
5402 
5403 	tx_cb->tx_bytes = (uint32_t)total_len;
5404 	tx_cb->mp = mp;
5405 	tx_cb->tx_dma_handle_used = 0;
5406 
5407 	if (tx_mode == USE_DMA) {
5408 		msg_len = total_len;
5409 
5410 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5411 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5412 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5413 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5414 
5415 		tx_entry = &mac_iocb_ptr->oal_entry[0];
5416 		oal_entry = NULL;
5417 
5418 		for (tp = mp, oal_entries = j = 0; tp != NULL;
5419 		    tp = tp->b_cont) {
5420 			/* if too many tx dma handles needed */
5421 			if (j >= QL_MAX_TX_DMA_HANDLES) {
5422 				tx_ring->tx_no_dma_handle++;
5423 				if (!force_pullup) {
5424 					force_pullup = 1;
5425 					goto do_pullup;
5426 				} else {
5427 					goto bad;
5428 				}
5429 			}
5430 			nbyte = (uint16_t)MBLKL(tp);
5431 			if (nbyte == 0)
5432 				continue;
5433 
5434 			status = ddi_dma_addr_bind_handle(
5435 			    tx_cb->tx_dma_handle[j], NULL,
5436 			    (caddr_t)tp->b_rptr, nbyte,
5437 			    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
5438 			    0, &dma_cookie, &ncookies);
5439 
5440 			QL_PRINT(DBG_TX, ("map sending data segment: %d, "
5441 			    "length: %d, spans in %d cookies\n",
5442 			    j, nbyte, ncookies));
5443 
5444 			if (status != DDI_DMA_MAPPED) {
5445 				goto bad;
5446 			}
5447 			/*
5448 			 * Each fragment can span several cookies. One cookie
5449 			 * will use one tx descriptor to transmit.
5450 			 */
5451 			for (i = ncookies; i > 0; i--, tx_entry++,
5452 			    oal_entries++) {
5453 				/*
5454 				 * The number of TX descriptors that can be
5455 				 *  saved in tx iocb and oal list is limited
5456 				 */
5457 				if (oal_entries > max_oal_entries) {
5458 					tx_ring->tx_no_dma_cookie++;
5459 					if (!force_pullup) {
5460 						force_pullup = 1;
5461 						goto do_pullup;
5462 					} else {
5463 						goto bad;
5464 					}
5465 				}
5466 
5467 				if ((oal_entries == TX_DESC_PER_IOCB) &&
5468 				    !oal_entry) {
5469 					/*
5470 					 * Time to switch to an oal list
5471 					 * The last entry should be copied
5472 					 * to first entry in the oal list
5473 					 */
5474 					oal_entry = tx_cb->oal;
5475 					tx_entry =
5476 					    &mac_iocb_ptr->oal_entry[
5477 					    TX_DESC_PER_IOCB-1];
5478 					bcopy(tx_entry, oal_entry,
5479 					    sizeof (*oal_entry));
5480 
5481 					/*
5482 					 * last entry should be updated to
5483 					 * point to the extended oal list itself
5484 					 */
5485 					tx_entry->buf_addr_low =
5486 					    cpu_to_le32(
5487 					    LS_64BITS(tx_cb->oal_dma_addr));
5488 					tx_entry->buf_addr_high =
5489 					    cpu_to_le32(
5490 					    MS_64BITS(tx_cb->oal_dma_addr));
5491 					/*
5492 					 * Point tx_entry to the oal list
5493 					 * second entry
5494 					 */
5495 					tx_entry = &oal_entry[1];
5496 				}
5497 
5498 				tx_entry->buf_len =
5499 				    (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
5500 				phys_addr = dma_cookie.dmac_laddress;
5501 				tx_entry->buf_addr_low =
5502 				    cpu_to_le32(LS_64BITS(phys_addr));
5503 				tx_entry->buf_addr_high =
5504 				    cpu_to_le32(MS_64BITS(phys_addr));
5505 
5506 				last_oal_entry = tx_entry;
5507 
5508 				if (i > 1)
5509 					ddi_dma_nextcookie(
5510 					    tx_cb->tx_dma_handle[j],
5511 					    &dma_cookie);
5512 			}
5513 			j++;
5514 		}
5515 		/*
5516 		 * if OAL is used, the last oal entry in tx iocb indicates
5517 		 * number of additional address/len pairs in OAL
5518 		 */
5519 		if (oal_entries > TX_DESC_PER_IOCB) {
5520 			tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
5521 			tx_entry->buf_len = (uint32_t)
5522 			    (cpu_to_le32((sizeof (struct oal_entry) *
5523 			    (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
5524 		}
5525 		last_oal_entry->buf_len = cpu_to_le32(
5526 		    le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
5527 
5528 		tx_cb->tx_dma_handle_used = j;
5529 		QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
5530 		    j, oal_entries));
5531 
5532 		bp = (caddr_t)mp->b_rptr;
5533 	}
5534 	if (tx_mode == USE_COPY) {
5535 		bp = tx_cb->copy_buffer;
5536 		off = 0;
5537 		nbyte = 0;
5538 		frags = 0;
5539 		/*
5540 		 * Copy up to tx_buf_len of the transmit data
5541 		 * from mp to tx buffer
5542 		 */
5543 		for (tp = mp; tp != NULL; tp = tp->b_cont) {
5544 			nbyte = MBLKL(tp);
5545 			if ((off + nbyte) <= tx_buf_len) {
5546 				bcopy(tp->b_rptr, &bp[off], nbyte);
5547 				off += nbyte;
5548 				frags ++;
5549 			}
5550 		}
5551 
5552 		msg_len = off;
5553 
5554 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5555 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5556 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5557 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5558 
5559 		QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
5560 		    "from %d segaments\n", msg_len, frags));
5561 
5562 		phys_addr = tx_cb->copy_buffer_dma_addr;
5563 		phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
5564 		phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
5565 
5566 		QL_DUMP(DBG_TX, "\t requested sending data:\n",
5567 		    (uint8_t *)tx_cb->copy_buffer, 8, total_len);
5568 
5569 		mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
5570 		    cpu_to_le32(msg_len | OAL_LAST_ENTRY);
5571 		mac_iocb_ptr->oal_entry[0].buf_addr_low  = phy_addr_low;
5572 		mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
5573 
5574 		freemsg(mp); /* no need, we have copied */
5575 		tx_cb->mp = NULL;
5576 	} /* End of Copy Mode */
5577 
5578 	/* Do TSO/LSO on TCP packet? */
5579 	if (use_lso && mss) {
5580 		ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
5581 	} else if (pflags & qlge->chksum_cap) {
5582 		/* Do checksum offloading */
5583 		ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
5584 	}
5585 
5586 	/* let device know the latest outbound IOCB */
5587 	(void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
5588 	    (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
5589 	    (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
5590 
5591 	if (tx_mode == USE_DMA) {
5592 		/* let device know the latest outbound OAL if necessary */
5593 		if (oal_entries > TX_DESC_PER_IOCB) {
5594 			(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5595 			    (off_t)0,
5596 			    (sizeof (struct oal_entry) *
5597 			    (oal_entries -TX_DESC_PER_IOCB+1)),
5598 			    DDI_DMA_SYNC_FORDEV);
5599 		}
5600 	} else { /* for USE_COPY mode, tx buffer has changed */
5601 		/* let device know the latest change */
5602 		(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5603 		/* copy buf offset */
5604 		    (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
5605 		    msg_len, DDI_DMA_SYNC_FORDEV);
5606 	}
5607 
5608 	/* save how the packet was sent */
5609 	tx_cb->tx_type = tx_mode;
5610 
5611 	QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
5612 	/* reduce the number of available tx slot */
5613 	atomic_dec_32(&tx_ring->tx_free_count);
5614 
5615 	tx_ring->prod_idx++;
5616 	if (tx_ring->prod_idx >= tx_ring->wq_len)
5617 		tx_ring->prod_idx = 0;
5618 
5619 	now = ddi_get_lbolt();
5620 	qlge->last_tx_time = now;
5621 
5622 	return (DDI_SUCCESS);
5623 
5624 bad:
5625 	/*
5626 	 * if for any reason driver can not send, delete
5627 	 * the message pointer, mp
5628 	 */
5629 	now = ddi_get_lbolt();
5630 	freemsg(mp);
5631 	mp = NULL;
5632 	tx_cb->mp = NULL;
5633 	for (i = 0; i < j; i++)
5634 		(void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5635 
5636 	QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5637 	    __func__, qlge->instance, (int)now));
5638 
5639 	return (DDI_SUCCESS);
5640 }
5641 
5642 
5643 /*
5644  * Initializes hardware and driver software flags before the driver
5645  * is finally ready to work.
5646  */
5647 int
5648 ql_do_start(qlge_t *qlge)
5649 {
5650 	int i;
5651 	struct rx_ring *rx_ring;
5652 	uint16_t lbq_buf_size;
5653 	int rings_done;
5654 
5655 	ASSERT(qlge != NULL);
5656 
5657 	mutex_enter(&qlge->hw_mutex);
5658 
5659 	/* Reset adapter */
5660 	(void) ql_asic_reset(qlge);
5661 
5662 	lbq_buf_size = (uint16_t)
5663 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
5664 	if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5665 #ifdef QLGE_LOAD_UNLOAD
5666 		cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5667 		    qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5668 #endif
5669 		/*
5670 		 * Check if any ring has buffers still with upper layers
5671 		 * If buffers are pending with upper layers, we use the
5672 		 * existing buffers and don't reallocate new ones
5673 		 * Unfortunately there is no way to evict buffers from
5674 		 * upper layers. Using buffers with the current size may
5675 		 * cause slightly sub-optimal performance, but that seems
5676 		 * to be the easiest way to handle this situation.
5677 		 */
5678 		rings_done = 0;
5679 		for (i = 0; i < qlge->rx_ring_count; i++) {
5680 			rx_ring = &qlge->rx_ring[i];
5681 			if (rx_ring->rx_indicate == 0)
5682 				rings_done++;
5683 			else
5684 				break;
5685 		}
5686 		/*
5687 		 * No buffers pending with upper layers;
5688 		 * reallocte them for new MTU size
5689 		 */
5690 		if (rings_done >= qlge->rx_ring_count) {
5691 			/* free large buffer pool */
5692 			for (i = 0; i < qlge->rx_ring_count; i++) {
5693 				rx_ring = &qlge->rx_ring[i];
5694 				if (rx_ring->type != TX_Q) {
5695 					ql_free_sbq_buffers(rx_ring);
5696 					ql_free_lbq_buffers(rx_ring);
5697 				}
5698 			}
5699 			/* reallocate large buffer pool */
5700 			for (i = 0; i < qlge->rx_ring_count; i++) {
5701 				rx_ring = &qlge->rx_ring[i];
5702 				if (rx_ring->type != TX_Q) {
5703 					(void) ql_alloc_sbufs(qlge, rx_ring);
5704 					(void) ql_alloc_lbufs(qlge, rx_ring);
5705 				}
5706 			}
5707 		}
5708 	}
5709 
5710 	if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5711 		cmn_err(CE_WARN, "qlge bringup adapter failed");
5712 		mutex_exit(&qlge->hw_mutex);
5713 		if (qlge->fm_enable) {
5714 			atomic_or_32(&qlge->flags, ADAPTER_ERROR);
5715 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
5716 		}
5717 		return (DDI_FAILURE);
5718 	}
5719 
5720 	mutex_exit(&qlge->hw_mutex);
5721 	/* if adapter is up successfully but was bad before */
5722 	if (qlge->flags & ADAPTER_ERROR) {
5723 		atomic_and_32(&qlge->flags, ~ADAPTER_ERROR);
5724 		if (qlge->fm_enable) {
5725 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
5726 		}
5727 	}
5728 
5729 	/* Get current link state */
5730 	qlge->port_link_state = ql_get_link_state(qlge);
5731 
5732 	if (qlge->port_link_state == LS_UP) {
5733 		QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5734 		    __func__, qlge->instance));
5735 		/* If driver detects a carrier on */
5736 		CARRIER_ON(qlge);
5737 	} else {
5738 		QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5739 		    __func__, qlge->instance));
5740 		/* If driver detects a lack of carrier */
5741 		CARRIER_OFF(qlge);
5742 	}
5743 	qlge->mac_flags = QL_MAC_STARTED;
5744 	return (DDI_SUCCESS);
5745 }
5746 
5747 /*
5748  * Stop currently running driver
5749  * Driver needs to stop routing new packets to driver and wait until
5750  * all pending tx/rx buffers to be free-ed.
5751  */
5752 int
5753 ql_do_stop(qlge_t *qlge)
5754 {
5755 	int rc = DDI_FAILURE;
5756 	uint32_t i, j, k;
5757 	struct bq_desc *sbq_desc, *lbq_desc;
5758 	struct rx_ring *rx_ring;
5759 
5760 	ASSERT(qlge != NULL);
5761 
5762 	CARRIER_OFF(qlge);
5763 
5764 	rc = ql_bringdown_adapter(qlge);
5765 	if (rc != DDI_SUCCESS) {
5766 		cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5767 	} else
5768 		rc = DDI_SUCCESS;
5769 
5770 	for (k = 0; k < qlge->rx_ring_count; k++) {
5771 		rx_ring = &qlge->rx_ring[k];
5772 		if (rx_ring->type != TX_Q) {
5773 			j = rx_ring->lbq_use_head;
5774 #ifdef QLGE_LOAD_UNLOAD
5775 			cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5776 			    " to free list %d\n total %d\n",
5777 			    k, rx_ring->lbuf_in_use_count,
5778 			    rx_ring->lbuf_free_count,
5779 			    rx_ring->lbuf_in_use_count +
5780 			    rx_ring->lbuf_free_count);
5781 #endif
5782 			for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5783 				lbq_desc = rx_ring->lbuf_in_use[j];
5784 				j++;
5785 				if (j >= rx_ring->lbq_len) {
5786 					j = 0;
5787 				}
5788 				if (lbq_desc->mp) {
5789 					atomic_inc_32(&rx_ring->rx_indicate);
5790 					freemsg(lbq_desc->mp);
5791 				}
5792 			}
5793 			rx_ring->lbq_use_head = j;
5794 			rx_ring->lbq_use_tail = j;
5795 			rx_ring->lbuf_in_use_count = 0;
5796 			j = rx_ring->sbq_use_head;
5797 #ifdef QLGE_LOAD_UNLOAD
5798 			cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5799 			    " to free list %d\n total %d \n",
5800 			    k, rx_ring->sbuf_in_use_count,
5801 			    rx_ring->sbuf_free_count,
5802 			    rx_ring->sbuf_in_use_count +
5803 			    rx_ring->sbuf_free_count);
5804 #endif
5805 			for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5806 				sbq_desc = rx_ring->sbuf_in_use[j];
5807 				j++;
5808 				if (j >= rx_ring->sbq_len) {
5809 					j = 0;
5810 				}
5811 				if (sbq_desc->mp) {
5812 					atomic_inc_32(&rx_ring->rx_indicate);
5813 					freemsg(sbq_desc->mp);
5814 				}
5815 			}
5816 			rx_ring->sbq_use_head = j;
5817 			rx_ring->sbq_use_tail = j;
5818 			rx_ring->sbuf_in_use_count = 0;
5819 		}
5820 	}
5821 
5822 	qlge->mac_flags = QL_MAC_STOPPED;
5823 
5824 	return (rc);
5825 }
5826 
5827 /*
5828  * Support
5829  */
5830 
5831 void
5832 ql_disable_isr(qlge_t *qlge)
5833 {
5834 	/*
5835 	 * disable the hardware interrupt
5836 	 */
5837 	ISP_DISABLE_GLOBAL_INTRS(qlge);
5838 
5839 	qlge->flags &= ~INTERRUPTS_ENABLED;
5840 }
5841 
5842 
5843 
5844 /*
5845  * busy wait for 'usecs' microseconds.
5846  */
5847 void
5848 qlge_delay(clock_t usecs)
5849 {
5850 	drv_usecwait(usecs);
5851 }
5852 
5853 /*
5854  * retrieve firmware details.
5855  */
5856 
5857 pci_cfg_t *
5858 ql_get_pci_config(qlge_t *qlge)
5859 {
5860 	return (&(qlge->pci_cfg));
5861 }
5862 
5863 /*
5864  * Get current Link status
5865  */
5866 static uint32_t
5867 ql_get_link_state(qlge_t *qlge)
5868 {
5869 	uint32_t bitToCheck = 0;
5870 	uint32_t temp, linkState;
5871 
5872 	if (qlge->func_number == qlge->fn0_net) {
5873 		bitToCheck = STS_PL0;
5874 	} else {
5875 		bitToCheck = STS_PL1;
5876 	}
5877 	temp = ql_read_reg(qlge, REG_STATUS);
5878 	QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5879 	    __func__, qlge->instance, temp));
5880 
5881 	if (temp & bitToCheck) {
5882 		linkState = LS_UP;
5883 	} else {
5884 		linkState = LS_DOWN;
5885 	}
5886 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
5887 		/* for Schultz, link Speed is fixed to 10G, full duplex */
5888 		qlge->speed  = SPEED_10G;
5889 		qlge->duplex = 1;
5890 	}
5891 	return (linkState);
5892 }
5893 /*
5894  * Get current link status and report to OS
5895  */
5896 static void
5897 ql_get_and_report_link_state(qlge_t *qlge)
5898 {
5899 	uint32_t cur_link_state;
5900 
5901 	/* Get current link state */
5902 	cur_link_state = ql_get_link_state(qlge);
5903 	/* if link state has changed */
5904 	if (cur_link_state != qlge->port_link_state) {
5905 
5906 		qlge->port_link_state = cur_link_state;
5907 
5908 		if (qlge->port_link_state == LS_UP) {
5909 			QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5910 			    __func__, qlge->instance));
5911 			/* If driver detects a carrier on */
5912 			CARRIER_ON(qlge);
5913 		} else {
5914 			QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5915 			    __func__, qlge->instance));
5916 			/* If driver detects a lack of carrier */
5917 			CARRIER_OFF(qlge);
5918 		}
5919 	}
5920 }
5921 
5922 /*
5923  * timer callback function executed after timer expires
5924  */
5925 static void
5926 ql_timer(void* arg)
5927 {
5928 	ql_get_and_report_link_state((qlge_t *)arg);
5929 }
5930 
5931 /*
5932  * stop the running timer if activated
5933  */
5934 static void
5935 ql_stop_timer(qlge_t *qlge)
5936 {
5937 	timeout_id_t timer_id;
5938 	/* Disable driver timer */
5939 	if (qlge->ql_timer_timeout_id != NULL) {
5940 		timer_id = qlge->ql_timer_timeout_id;
5941 		qlge->ql_timer_timeout_id = NULL;
5942 		(void) untimeout(timer_id);
5943 	}
5944 }
5945 
5946 /*
5947  * stop then restart timer
5948  */
5949 void
5950 ql_restart_timer(qlge_t *qlge)
5951 {
5952 	ql_stop_timer(qlge);
5953 	qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5954 	qlge->ql_timer_timeout_id = timeout(ql_timer,
5955 	    (void *)qlge, qlge->ql_timer_ticks);
5956 }
5957 
5958 /* ************************************************************************* */
5959 /*
5960  *		Hardware K-Stats Data Structures and Subroutines
5961  */
5962 /* ************************************************************************* */
5963 static const ql_ksindex_t ql_kstats_hw[] = {
5964 	/* PCI related hardware information */
5965 	{ 0, "Vendor Id"			},
5966 	{ 1, "Device Id"			},
5967 	{ 2, "Command"				},
5968 	{ 3, "Status"				},
5969 	{ 4, "Revision Id"			},
5970 	{ 5, "Cache Line Size"			},
5971 	{ 6, "Latency Timer"			},
5972 	{ 7, "Header Type"			},
5973 	{ 9, "I/O base addr"			},
5974 	{ 10, "Control Reg Base addr low"	},
5975 	{ 11, "Control Reg Base addr high"	},
5976 	{ 12, "Doorbell Reg Base addr low"	},
5977 	{ 13, "Doorbell Reg Base addr high"	},
5978 	{ 14, "Subsystem Vendor Id"		},
5979 	{ 15, "Subsystem Device ID"		},
5980 	{ 16, "PCIe Device Control"		},
5981 	{ 17, "PCIe Link Status"		},
5982 
5983 	{ -1,	NULL				},
5984 };
5985 
5986 /*
5987  * kstat update function for PCI registers
5988  */
5989 static int
5990 ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
5991 {
5992 	qlge_t *qlge;
5993 	kstat_named_t *knp;
5994 
5995 	if (flag != KSTAT_READ)
5996 		return (EACCES);
5997 
5998 	qlge = ksp->ks_private;
5999 	knp = ksp->ks_data;
6000 	(knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
6001 	(knp++)->value.ui32 = qlge->pci_cfg.device_id;
6002 	(knp++)->value.ui32 = qlge->pci_cfg.command;
6003 	(knp++)->value.ui32 = qlge->pci_cfg.status;
6004 	(knp++)->value.ui32 = qlge->pci_cfg.revision;
6005 	(knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
6006 	(knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
6007 	(knp++)->value.ui32 = qlge->pci_cfg.header_type;
6008 	(knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
6009 	(knp++)->value.ui32 =
6010 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
6011 	(knp++)->value.ui32 =
6012 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
6013 	(knp++)->value.ui32 =
6014 	    qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
6015 	(knp++)->value.ui32 =
6016 	    qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
6017 	(knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
6018 	(knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
6019 	(knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
6020 	(knp++)->value.ui32 = qlge->pci_cfg.link_status;
6021 
6022 	return (0);
6023 }
6024 
6025 static const ql_ksindex_t ql_kstats_mii[] = {
6026 	/* MAC/MII related hardware information */
6027 	{ 0, "mtu"},
6028 
6029 	{ -1, NULL},
6030 };
6031 
6032 
6033 /*
6034  * kstat update function for MII related information.
6035  */
6036 static int
6037 ql_kstats_mii_update(kstat_t *ksp, int flag)
6038 {
6039 	qlge_t *qlge;
6040 	kstat_named_t *knp;
6041 
6042 	if (flag != KSTAT_READ)
6043 		return (EACCES);
6044 
6045 	qlge = ksp->ks_private;
6046 	knp = ksp->ks_data;
6047 
6048 	(knp++)->value.ui32 = qlge->mtu;
6049 
6050 	return (0);
6051 }
6052 
6053 static const ql_ksindex_t ql_kstats_reg[] = {
6054 	/* Register information */
6055 	{ 0, "System (0x08)"			},
6056 	{ 1, "Reset/Fail Over(0x0Ch"		},
6057 	{ 2, "Function Specific Control(0x10)"	},
6058 	{ 3, "Status (0x30)"			},
6059 	{ 4, "Intr Enable (0x34)"		},
6060 	{ 5, "Intr Status1 (0x3C)"		},
6061 	{ 6, "Error Status (0x54)"		},
6062 	{ 7, "XGMAC Flow Control(0x11C)"	},
6063 	{ 8, "XGMAC Tx Pause Frames(0x230)"	},
6064 	{ 9, "XGMAC Rx Pause Frames(0x388)"	},
6065 	{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)"	},
6066 	{ 11, "interrupts actually allocated"	},
6067 	{ 12, "interrupts on rx ring 0"		},
6068 	{ 13, "interrupts on rx ring 1"		},
6069 	{ 14, "interrupts on rx ring 2"		},
6070 	{ 15, "interrupts on rx ring 3"		},
6071 	{ 16, "interrupts on rx ring 4"		},
6072 	{ 17, "interrupts on rx ring 5"		},
6073 	{ 18, "interrupts on rx ring 6"		},
6074 	{ 19, "interrupts on rx ring 7"		},
6075 	{ 20, "polls on rx ring 0"		},
6076 	{ 21, "polls on rx ring 1"		},
6077 	{ 22, "polls on rx ring 2"		},
6078 	{ 23, "polls on rx ring 3"		},
6079 	{ 24, "polls on rx ring 4"		},
6080 	{ 25, "polls on rx ring 5"		},
6081 	{ 26, "polls on rx ring 6"		},
6082 	{ 27, "polls on rx ring 7"		},
6083 	{ 28, "tx no resource on ring 0"	},
6084 	{ 29, "tx dma bind fail on ring 0"	},
6085 	{ 30, "tx dma no handle on ring 0"	},
6086 	{ 31, "tx dma no cookie on ring 0"	},
6087 	{ 32, "MPI firmware major version"	},
6088 	{ 33, "MPI firmware minor version"	},
6089 	{ 34, "MPI firmware sub version"	},
6090 	{ 35, "rx no resource"			},
6091 
6092 	{ -1, NULL},
6093 };
6094 
6095 
6096 /*
6097  * kstat update function for device register set
6098  */
6099 static int
6100 ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
6101 {
6102 	qlge_t *qlge;
6103 	kstat_named_t *knp;
6104 	uint32_t val32;
6105 	int i = 0;
6106 	struct tx_ring *tx_ring;
6107 	struct rx_ring *rx_ring;
6108 
6109 	if (flag != KSTAT_READ)
6110 		return (EACCES);
6111 
6112 	qlge = ksp->ks_private;
6113 	knp = ksp->ks_data;
6114 
6115 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
6116 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
6117 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
6118 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
6119 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
6120 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
6121 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
6122 
6123 	if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
6124 		return (0);
6125 	}
6126 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
6127 	(knp++)->value.ui32 = val32;
6128 
6129 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
6130 	(knp++)->value.ui32 = val32;
6131 
6132 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
6133 	(knp++)->value.ui32 = val32;
6134 
6135 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
6136 	(knp++)->value.ui32 = val32;
6137 
6138 	ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
6139 
6140 	(knp++)->value.ui32 = qlge->intr_cnt;
6141 
6142 	for (i = 0; i < 8; i++) {
6143 		(knp++)->value.ui32 = qlge->rx_interrupts[i];
6144 	}
6145 
6146 	for (i = 0; i < 8; i++) {
6147 		(knp++)->value.ui32 = qlge->rx_polls[i];
6148 	}
6149 
6150 	tx_ring = &qlge->tx_ring[0];
6151 	(knp++)->value.ui32 = tx_ring->defer;
6152 	(knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
6153 	(knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
6154 	(knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
6155 
6156 	(knp++)->value.ui32 = qlge->fw_version_info.major_version;
6157 	(knp++)->value.ui32 = qlge->fw_version_info.minor_version;
6158 	(knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
6159 
6160 	for (i = 0; i < qlge->rx_ring_count; i++) {
6161 		rx_ring = &qlge->rx_ring[i];
6162 		val32 += rx_ring->rx_packets_dropped_no_buffer;
6163 	}
6164 	(knp++)->value.ui32 = val32;
6165 
6166 	return (0);
6167 }
6168 
6169 
6170 static kstat_t *
6171 ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
6172     const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
6173 {
6174 	kstat_t *ksp;
6175 	kstat_named_t *knp;
6176 	char *np;
6177 	int type;
6178 
6179 	size /= sizeof (ql_ksindex_t);
6180 	ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
6181 	    KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
6182 	if (ksp == NULL)
6183 		return (NULL);
6184 
6185 	ksp->ks_private = qlge;
6186 	ksp->ks_update = update;
6187 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
6188 		switch (*np) {
6189 		default:
6190 			type = KSTAT_DATA_UINT32;
6191 			break;
6192 		case '&':
6193 			np += 1;
6194 			type = KSTAT_DATA_CHAR;
6195 			break;
6196 		}
6197 		kstat_named_init(knp, np, (uint8_t)type);
6198 	}
6199 	kstat_install(ksp);
6200 
6201 	return (ksp);
6202 }
6203 
6204 /*
6205  * Setup various kstat
6206  */
6207 int
6208 ql_init_kstats(qlge_t *qlge)
6209 {
6210 	/* Hardware KStats */
6211 	qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
6212 	    qlge->instance, "chip", ql_kstats_hw,
6213 	    sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
6214 	if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
6215 		return (DDI_FAILURE);
6216 	}
6217 
6218 	/* MII KStats */
6219 	qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
6220 	    qlge->instance, "mii", ql_kstats_mii,
6221 	    sizeof (ql_kstats_mii), ql_kstats_mii_update);
6222 	if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
6223 		return (DDI_FAILURE);
6224 	}
6225 
6226 	/* REG KStats */
6227 	qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
6228 	    qlge->instance, "reg", ql_kstats_reg,
6229 	    sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
6230 	if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
6231 		return (DDI_FAILURE);
6232 	}
6233 	return (DDI_SUCCESS);
6234 }
6235 
6236 /*
6237  * delete all kstat
6238  */
6239 void
6240 ql_fini_kstats(qlge_t *qlge)
6241 {
6242 	int i;
6243 
6244 	for (i = 0; i < QL_KSTAT_COUNT; i++) {
6245 		if (qlge->ql_kstats[i] != NULL)
6246 			kstat_delete(qlge->ql_kstats[i]);
6247 	}
6248 }
6249 
6250 /* ************************************************************************* */
6251 /*
6252  *                                 kstat end
6253  */
6254 /* ************************************************************************* */
6255 
6256 /*
6257  * Setup the parameters for receive and transmit rings including buffer sizes
6258  * and completion queue sizes
6259  */
6260 static int
6261 ql_setup_rings(qlge_t *qlge)
6262 {
6263 	uint8_t i;
6264 	struct rx_ring *rx_ring;
6265 	struct tx_ring *tx_ring;
6266 	uint16_t lbq_buf_size;
6267 
6268 	lbq_buf_size = (uint16_t)
6269 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
6270 
6271 	/*
6272 	 * rx_ring[0] is always the default queue.
6273 	 */
6274 	/*
6275 	 * qlge->rx_ring_count:
6276 	 * Total number of rx_rings. This includes a number
6277 	 * of outbound completion handler rx_rings, and a
6278 	 * number of inbound completion handler rx_rings.
6279 	 * rss is only enabled if we have more than 1 rx completion
6280 	 * queue. If we have a single rx completion queue
6281 	 * then all rx completions go to this queue and
6282 	 * the last completion queue
6283 	 */
6284 
6285 	qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
6286 
6287 	for (i = 0; i < qlge->tx_ring_count; i++) {
6288 		tx_ring = &qlge->tx_ring[i];
6289 		bzero((void *)tx_ring, sizeof (*tx_ring));
6290 		tx_ring->qlge = qlge;
6291 		tx_ring->wq_id = i;
6292 		tx_ring->wq_len = qlge->tx_ring_size;
6293 		tx_ring->wq_size = (uint32_t)(
6294 		    tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
6295 
6296 		/*
6297 		 * The completion queue ID for the tx rings start
6298 		 * immediately after the last rss completion queue.
6299 		 */
6300 		tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
6301 	}
6302 
6303 	for (i = 0; i < qlge->rx_ring_count; i++) {
6304 		rx_ring = &qlge->rx_ring[i];
6305 		bzero((void *)rx_ring, sizeof (*rx_ring));
6306 		rx_ring->qlge = qlge;
6307 		rx_ring->cq_id = i;
6308 		if (i != 0)
6309 			rx_ring->cpu = (i) % qlge->rx_ring_count;
6310 		else
6311 			rx_ring->cpu = 0;
6312 
6313 		if (i < qlge->rss_ring_count) {
6314 			/*
6315 			 * Inbound completions (RSS) queues
6316 			 * Default queue is queue 0 which handles
6317 			 * unicast plus bcast/mcast and async events.
6318 			 * Other inbound queues handle unicast frames only.
6319 			 */
6320 			rx_ring->cq_len = qlge->rx_ring_size;
6321 			rx_ring->cq_size = (uint32_t)
6322 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6323 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
6324 			rx_ring->lbq_size = (uint32_t)
6325 			    (rx_ring->lbq_len * sizeof (uint64_t));
6326 			rx_ring->lbq_buf_size = lbq_buf_size;
6327 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
6328 			rx_ring->sbq_size = (uint32_t)
6329 			    (rx_ring->sbq_len * sizeof (uint64_t));
6330 			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
6331 			rx_ring->type = RX_Q;
6332 
6333 			QL_PRINT(DBG_GLD,
6334 			    ("%s(%d)Allocating rss completion queue %d "
6335 			    "on cpu %d\n", __func__, qlge->instance,
6336 			    rx_ring->cq_id, rx_ring->cpu));
6337 		} else {
6338 			/*
6339 			 * Outbound queue handles outbound completions only
6340 			 */
6341 			/* outbound cq is same size as tx_ring it services. */
6342 			QL_PRINT(DBG_INIT, ("rx_ring 0x%p i %d\n", rx_ring, i));
6343 			rx_ring->cq_len = qlge->tx_ring_size;
6344 			rx_ring->cq_size = (uint32_t)
6345 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6346 			rx_ring->lbq_len = 0;
6347 			rx_ring->lbq_size = 0;
6348 			rx_ring->lbq_buf_size = 0;
6349 			rx_ring->sbq_len = 0;
6350 			rx_ring->sbq_size = 0;
6351 			rx_ring->sbq_buf_size = 0;
6352 			rx_ring->type = TX_Q;
6353 
6354 			QL_PRINT(DBG_GLD,
6355 			    ("%s(%d)Allocating TX completion queue %d on"
6356 			    " cpu %d\n", __func__, qlge->instance,
6357 			    rx_ring->cq_id, rx_ring->cpu));
6358 		}
6359 	}
6360 
6361 	return (DDI_SUCCESS);
6362 }
6363 
6364 static int
6365 ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
6366 {
6367 	struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
6368 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6369 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6370 	/* first shadow area is used by wqicb's host copy of consumer index */
6371 	    + sizeof (uint64_t);
6372 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6373 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6374 	    + sizeof (uint64_t);
6375 	/* lrg/sml bufq pointers */
6376 	uint8_t *buf_q_base_reg =
6377 	    (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
6378 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6379 	uint64_t buf_q_base_reg_dma =
6380 	    qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
6381 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6382 	caddr_t doorbell_area =
6383 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
6384 	int err = 0;
6385 	uint16_t bq_len;
6386 	uint64_t tmp;
6387 	uint64_t *base_indirect_ptr;
6388 	int page_entries;
6389 
6390 	/* Set up the shadow registers for this ring. */
6391 	rx_ring->prod_idx_sh_reg = shadow_reg;
6392 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
6393 	rx_ring->prod_idx_sh_reg_offset = (off_t)(((rx_ring->cq_id *
6394 	    sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) + sizeof (uint64_t)));
6395 
6396 	rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6397 	rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
6398 
6399 	QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
6400 	    " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
6401 	    rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
6402 
6403 	buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6404 	buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6405 	rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6406 	rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
6407 
6408 	/* PCI doorbell mem area + 0x00 for consumer index register */
6409 	rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6410 	rx_ring->cnsmr_idx = 0;
6411 	*rx_ring->prod_idx_sh_reg = 0;
6412 	rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
6413 
6414 	/* PCI doorbell mem area + 0x04 for valid register */
6415 	rx_ring->valid_db_reg = (uint32_t *)(void *)
6416 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6417 
6418 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
6419 	rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
6420 	    ((uint8_t *)(void *)doorbell_area + 0x18);
6421 
6422 	/* PCI doorbell mem area + 0x1c */
6423 	rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
6424 	    ((uint8_t *)(void *)doorbell_area + 0x1c);
6425 
6426 	bzero((void *)cqicb, sizeof (*cqicb));
6427 
6428 	cqicb->msix_vect = (uint8_t)rx_ring->irq;
6429 
6430 	bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
6431 	    (uint16_t)0 : (uint16_t)rx_ring->cq_len);
6432 	cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
6433 
6434 	cqicb->cq_base_addr_lo =
6435 	    cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
6436 	cqicb->cq_base_addr_hi =
6437 	    cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
6438 
6439 	cqicb->prod_idx_addr_lo =
6440 	    cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6441 	cqicb->prod_idx_addr_hi =
6442 	    cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6443 
6444 	/*
6445 	 * Set up the control block load flags.
6446 	 */
6447 	cqicb->flags = FLAGS_LC | /* Load queue base address */
6448 	    FLAGS_LV | /* Load MSI-X vector */
6449 	    FLAGS_LI;  /* Load irq delay values */
6450 	if (rx_ring->lbq_len) {
6451 		/* Load lbq values */
6452 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
6453 		tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
6454 		base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
6455 		page_entries = 0;
6456 		do {
6457 			*base_indirect_ptr = cpu_to_le64(tmp);
6458 			tmp += VM_PAGE_SIZE;
6459 			base_indirect_ptr++;
6460 			page_entries++;
6461 		} while (page_entries < (int)(
6462 		    ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6463 
6464 		cqicb->lbq_addr_lo =
6465 		    cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
6466 		cqicb->lbq_addr_hi =
6467 		    cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
6468 		bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
6469 		    (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
6470 		cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
6471 		bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
6472 		    (uint16_t)rx_ring->lbq_len);
6473 		cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
6474 		rx_ring->lbq_prod_idx = 0;
6475 		rx_ring->lbq_curr_idx = 0;
6476 	}
6477 	if (rx_ring->sbq_len) {
6478 		/* Load sbq values */
6479 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
6480 		tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
6481 		base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
6482 		page_entries = 0;
6483 
6484 		do {
6485 			*base_indirect_ptr = cpu_to_le64(tmp);
6486 			tmp += VM_PAGE_SIZE;
6487 			base_indirect_ptr++;
6488 			page_entries++;
6489 		} while (page_entries < (uint32_t)
6490 		    (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6491 
6492 		cqicb->sbq_addr_lo =
6493 		    cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
6494 		cqicb->sbq_addr_hi =
6495 		    cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
6496 		cqicb->sbq_buf_size = (uint16_t)
6497 		    cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
6498 		bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
6499 		    (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
6500 		cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
6501 		rx_ring->sbq_prod_idx = 0;
6502 		rx_ring->sbq_curr_idx = 0;
6503 	}
6504 	switch (rx_ring->type) {
6505 	case TX_Q:
6506 		cqicb->irq_delay = (uint16_t)
6507 		    cpu_to_le16(qlge->tx_coalesce_usecs);
6508 		cqicb->pkt_delay = (uint16_t)
6509 		    cpu_to_le16(qlge->tx_max_coalesced_frames);
6510 		break;
6511 
6512 	case DEFAULT_Q:
6513 		cqicb->irq_delay = (uint16_t)
6514 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6515 		cqicb->pkt_delay = (uint16_t)
6516 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6517 		break;
6518 
6519 	case RX_Q:
6520 		/*
6521 		 * Inbound completion handling rx_rings run in
6522 		 * separate NAPI contexts.
6523 		 */
6524 		cqicb->irq_delay = (uint16_t)
6525 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6526 		cqicb->pkt_delay = (uint16_t)
6527 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6528 		break;
6529 	default:
6530 		cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
6531 		    rx_ring->type);
6532 	}
6533 	QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
6534 	    rx_ring->cq_id));
6535 	/* QL_DUMP_CQICB(qlge, cqicb); */
6536 	err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
6537 	    rx_ring->cq_id);
6538 	if (err) {
6539 		cmn_err(CE_WARN, "Failed to load CQICB.");
6540 		return (err);
6541 	}
6542 
6543 	rx_ring->rx_packets_dropped_no_buffer = 0;
6544 	rx_ring->rx_pkt_dropped_mac_unenabled = 0;
6545 	rx_ring->rx_failed_sbq_allocs = 0;
6546 	rx_ring->rx_failed_lbq_allocs = 0;
6547 	rx_ring->rx_packets = 0;
6548 	rx_ring->rx_bytes = 0;
6549 	rx_ring->frame_too_long = 0;
6550 	rx_ring->frame_too_short = 0;
6551 	rx_ring->fcs_err = 0;
6552 
6553 	return (err);
6554 }
6555 
6556 /*
6557  * start RSS
6558  */
6559 static int
6560 ql_start_rss(qlge_t *qlge)
6561 {
6562 	struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
6563 	int status = 0;
6564 	int i;
6565 	uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
6566 
6567 	bzero((void *)ricb, sizeof (*ricb));
6568 
6569 	ricb->base_cq = RSS_L4K;
6570 	ricb->flags =
6571 	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
6572 	    RSS_RT6);
6573 	ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
6574 
6575 	/*
6576 	 * Fill out the Indirection Table.
6577 	 */
6578 	for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
6579 		hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
6580 
6581 	(void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
6582 	(void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
6583 
6584 	QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
6585 
6586 	status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
6587 	if (status) {
6588 		cmn_err(CE_WARN, "Failed to load RICB.");
6589 		return (status);
6590 	}
6591 
6592 	return (status);
6593 }
6594 
6595 /*
6596  * load a tx ring control block to hw and start this ring
6597  */
6598 static int
6599 ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
6600 {
6601 	struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
6602 	caddr_t doorbell_area =
6603 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
6604 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6605 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6606 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6607 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6608 	int err = 0;
6609 
6610 	/*
6611 	 * Assign doorbell registers for this tx_ring.
6612 	 */
6613 
6614 	/* TX PCI doorbell mem area for tx producer index */
6615 	tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6616 	tx_ring->prod_idx = 0;
6617 	/* TX PCI doorbell mem area + 0x04 */
6618 	tx_ring->valid_db_reg = (uint32_t *)(void *)
6619 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6620 
6621 	/*
6622 	 * Assign shadow registers for this tx_ring.
6623 	 */
6624 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
6625 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
6626 	*tx_ring->cnsmr_idx_sh_reg = 0;
6627 
6628 	QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
6629 	    " phys_addr 0x%lx\n",
6630 	    __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
6631 	    tx_ring->cnsmr_idx_sh_reg_dma));
6632 
6633 	wqicb->len =
6634 	    (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
6635 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
6636 	    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
6637 	wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
6638 	wqicb->rid = 0;
6639 	wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
6640 	wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
6641 	wqicb->cnsmr_idx_addr_lo =
6642 	    cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6643 	wqicb->cnsmr_idx_addr_hi =
6644 	    cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6645 
6646 	ql_init_tx_ring(tx_ring);
6647 	/* QL_DUMP_WQICB(qlge, wqicb); */
6648 	err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6649 	    tx_ring->wq_id);
6650 
6651 	if (err) {
6652 		cmn_err(CE_WARN, "Failed to load WQICB.");
6653 		return (err);
6654 	}
6655 	return (err);
6656 }
6657 
6658 /*
6659  * Set up a MAC, multicast or VLAN address for the
6660  * inbound frame matching.
6661  */
6662 int
6663 ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6664     uint16_t index)
6665 {
6666 	uint32_t offset = 0;
6667 	int status = DDI_SUCCESS;
6668 
6669 	switch (type) {
6670 	case MAC_ADDR_TYPE_MULTI_MAC:
6671 	case MAC_ADDR_TYPE_CAM_MAC: {
6672 		uint32_t cam_output;
6673 		uint32_t upper = (addr[0] << 8) | addr[1];
6674 		uint32_t lower =
6675 		    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6676 		    (addr[5]);
6677 
6678 		QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6679 		    MAC_ADDR_TYPE_MULTI_MAC) ?
6680 		    "MULTICAST" : "UNICAST"));
6681 		QL_PRINT(DBG_INIT,
6682 		    ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6683 		    "the CAM.\n",
6684 		    addr[0], addr[1], addr[2], addr[3], addr[4],
6685 		    addr[5], index));
6686 
6687 		status = ql_wait_reg_rdy(qlge,
6688 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6689 		if (status)
6690 			goto exit;
6691 		/* offset 0 - lower 32 bits of the MAC address */
6692 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6693 		    (offset++) |
6694 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6695 		    type);	/* type */
6696 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6697 		status = ql_wait_reg_rdy(qlge,
6698 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6699 		if (status)
6700 			goto exit;
6701 		/* offset 1 - upper 16 bits of the MAC address */
6702 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6703 		    (offset++) |
6704 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6705 		    type);	/* type */
6706 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6707 		status = ql_wait_reg_rdy(qlge,
6708 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6709 		if (status)
6710 			goto exit;
6711 		/* offset 2 - CQ ID associated with this MAC address */
6712 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6713 		    (offset) | (index << MAC_ADDR_IDX_SHIFT) |	/* index */
6714 		    type);	/* type */
6715 		/*
6716 		 * This field should also include the queue id
6717 		 * and possibly the function id.  Right now we hardcode
6718 		 * the route field to NIC core.
6719 		 */
6720 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
6721 			cam_output = (CAM_OUT_ROUTE_NIC |
6722 			    (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6723 			    (0 <<
6724 			    CAM_OUT_CQ_ID_SHIFT));
6725 
6726 			/* route to NIC core */
6727 			ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6728 			    cam_output);
6729 			}
6730 		break;
6731 		}
6732 	default:
6733 		cmn_err(CE_WARN,
6734 		    "Address type %d not yet supported.", type);
6735 		status = DDI_FAILURE;
6736 	}
6737 exit:
6738 	return (status);
6739 }
6740 
6741 /*
6742  * The NIC function for this chip has 16 routing indexes.  Each one can be used
6743  * to route different frame types to various inbound queues.  We send broadcast
6744  * multicast/error frames to the default queue for slow handling,
6745  * and CAM hit/RSS frames to the fast handling queues.
6746  */
6747 static int
6748 ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6749 {
6750 	int status;
6751 	uint32_t value = 0;
6752 
6753 	QL_PRINT(DBG_INIT,
6754 	    ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6755 	    (enable ? "Adding" : "Removing"),
6756 	    ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6757 	    ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6758 	    ((index ==
6759 	    RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6760 	    ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6761 	    ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6762 	    ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6763 	    ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6764 	    ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6765 	    ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6766 	    ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6767 	    ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6768 	    ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6769 	    ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6770 	    ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6771 	    ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6772 	    ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6773 	    (enable ? "to" : "from")));
6774 
6775 	switch (mask) {
6776 	case RT_IDX_CAM_HIT:
6777 		value = RT_IDX_DST_CAM_Q | /* dest */
6778 		    RT_IDX_TYPE_NICQ | /* type */
6779 		    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6780 		break;
6781 
6782 	case RT_IDX_VALID: /* Promiscuous Mode frames. */
6783 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6784 		    RT_IDX_TYPE_NICQ |	/* type */
6785 		    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6786 		break;
6787 
6788 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
6789 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6790 		    RT_IDX_TYPE_NICQ |	/* type */
6791 		    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6792 		break;
6793 
6794 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
6795 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6796 		    RT_IDX_TYPE_NICQ |	/* type */
6797 		    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6798 		break;
6799 
6800 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
6801 		value = RT_IDX_DST_CAM_Q |	/* dest */
6802 		    RT_IDX_TYPE_NICQ |	/* type */
6803 		    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6804 		break;
6805 
6806 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
6807 		value = RT_IDX_DST_CAM_Q |	/* dest */
6808 		    RT_IDX_TYPE_NICQ |	/* type */
6809 		    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6810 		break;
6811 
6812 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
6813 		value = RT_IDX_DST_RSS |	/* dest */
6814 		    RT_IDX_TYPE_NICQ |	/* type */
6815 		    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6816 		break;
6817 
6818 	case 0:	/* Clear the E-bit on an entry. */
6819 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6820 		    RT_IDX_TYPE_NICQ |	/* type */
6821 		    (index << RT_IDX_IDX_SHIFT); /* index */
6822 		break;
6823 
6824 	default:
6825 		cmn_err(CE_WARN, "Mask type %d not yet supported.",
6826 		    mask);
6827 		status = -EPERM;
6828 		goto exit;
6829 	}
6830 
6831 	if (value != 0) {
6832 		status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6833 		if (status)
6834 			goto exit;
6835 		value |= (enable ? RT_IDX_E : 0);
6836 		ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6837 		ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6838 	}
6839 
6840 exit:
6841 	return (status);
6842 }
6843 
6844 /*
6845  * Clear all the entries in the routing table.
6846  * Caller must get semaphore in advance.
6847  */
6848 
6849 static int
6850 ql_stop_routing(qlge_t *qlge)
6851 {
6852 	int status = 0;
6853 	int i;
6854 	/* Clear all the entries in the routing table. */
6855 	for (i = 0; i < 16; i++) {
6856 		status = ql_set_routing_reg(qlge, i, 0, 0);
6857 		if (status) {
6858 			cmn_err(CE_WARN, "Stop routing failed. ");
6859 		}
6860 	}
6861 	return (status);
6862 }
6863 
6864 /* Initialize the frame-to-queue routing. */
6865 static int
6866 ql_route_initialize(qlge_t *qlge)
6867 {
6868 	int status = 0;
6869 
6870 	status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6871 	if (status != DDI_SUCCESS)
6872 		return (status);
6873 
6874 	/* Clear all the entries in the routing table. */
6875 	status = ql_stop_routing(qlge);
6876 	if (status) {
6877 		goto exit;
6878 	}
6879 	status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6880 	if (status) {
6881 		cmn_err(CE_WARN,
6882 		    "Failed to init routing register for broadcast packets.");
6883 		goto exit;
6884 	}
6885 	/*
6886 	 * If we have more than one inbound queue, then turn on RSS in the
6887 	 * routing block.
6888 	 */
6889 	if (qlge->rss_ring_count > 1) {
6890 		status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6891 		    RT_IDX_RSS_MATCH, 1);
6892 		if (status) {
6893 			cmn_err(CE_WARN,
6894 			    "Failed to init routing register for MATCH RSS "
6895 			    "packets.");
6896 			goto exit;
6897 		}
6898 	}
6899 
6900 	status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6901 	    RT_IDX_CAM_HIT, 1);
6902 	if (status) {
6903 		cmn_err(CE_WARN,
6904 		    "Failed to init routing register for CAM packets.");
6905 		goto exit;
6906 	}
6907 
6908 	status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6909 	    RT_IDX_MCAST_MATCH, 1);
6910 	if (status) {
6911 		cmn_err(CE_WARN,
6912 		    "Failed to init routing register for Multicast "
6913 		    "packets.");
6914 	}
6915 
6916 exit:
6917 	ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6918 	return (status);
6919 }
6920 
6921 /*
6922  * Initialize hardware
6923  */
6924 static int
6925 ql_device_initialize(qlge_t *qlge)
6926 {
6927 	uint32_t value, mask;
6928 	int i;
6929 	int status = 0;
6930 	uint16_t pause = PAUSE_MODE_DISABLED;
6931 	boolean_t update_port_config = B_FALSE;
6932 	uint32_t pause_bit_mask;
6933 	boolean_t dcbx_enable = B_FALSE;
6934 	uint32_t dcbx_bit_mask = 0x10;
6935 	/*
6936 	 * Set up the System register to halt on errors.
6937 	 */
6938 	value = SYS_EFE | SYS_FAE;
6939 	mask = value << 16;
6940 	ql_write_reg(qlge, REG_SYSTEM, mask | value);
6941 
6942 	/* Set the default queue. */
6943 	value = NIC_RCV_CFG_DFQ;
6944 	mask = NIC_RCV_CFG_DFQ_MASK;
6945 
6946 	ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6947 
6948 	/* Enable the MPI interrupt. */
6949 	ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6950 	    | INTR_MASK_PI);
6951 	/* Enable the function, set pagesize, enable error checking. */
6952 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6953 	    FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6954 	/* Set/clear header splitting. */
6955 	if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6956 		value |= FSC_SH;
6957 		ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6958 	}
6959 	mask = FSC_VM_PAGESIZE_MASK |
6960 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6961 	ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6962 	/*
6963 	 * check current port max frame size, if different from OS setting,
6964 	 * then we need to change
6965 	 */
6966 	qlge->max_frame_size =
6967 	    (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6968 
6969 	mutex_enter(&qlge->mbx_mutex);
6970 	status = ql_get_port_cfg(qlge);
6971 	mutex_exit(&qlge->mbx_mutex);
6972 
6973 	if (status == DDI_SUCCESS) {
6974 		/* if current frame size is smaller than required size */
6975 		if (qlge->port_cfg_info.max_frame_size <
6976 		    qlge->max_frame_size) {
6977 			QL_PRINT(DBG_MBX,
6978 			    ("update frame size, current %d, new %d\n",
6979 			    qlge->port_cfg_info.max_frame_size,
6980 			    qlge->max_frame_size));
6981 			qlge->port_cfg_info.max_frame_size =
6982 			    qlge->max_frame_size;
6983 			qlge->port_cfg_info.link_cfg |= ENABLE_JUMBO;
6984 			update_port_config = B_TRUE;
6985 		}
6986 
6987 		if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
6988 			pause = PAUSE_MODE_STANDARD;
6989 		else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
6990 			pause = PAUSE_MODE_PER_PRIORITY;
6991 
6992 		if (pause != qlge->pause) {
6993 			pause_bit_mask = 0x60;	/* bit 5-6 */
6994 			/* clear pause bits */
6995 			qlge->port_cfg_info.link_cfg &= ~pause_bit_mask;
6996 			if (qlge->pause == PAUSE_MODE_STANDARD)
6997 				qlge->port_cfg_info.link_cfg |= STD_PAUSE;
6998 			else if (qlge->pause == PAUSE_MODE_PER_PRIORITY)
6999 				qlge->port_cfg_info.link_cfg |= PP_PAUSE;
7000 			update_port_config = B_TRUE;
7001 		}
7002 
7003 		if (qlge->port_cfg_info.link_cfg & DCBX_ENABLE)
7004 			dcbx_enable = B_TRUE;
7005 		if (dcbx_enable != qlge->dcbx_enable) {
7006 			qlge->port_cfg_info.link_cfg &= ~dcbx_bit_mask;
7007 			if (qlge->dcbx_enable)
7008 				qlge->port_cfg_info.link_cfg |= DCBX_ENABLE;
7009 		}
7010 
7011 		update_port_config = B_TRUE;
7012 
7013 		/* if need to update port configuration */
7014 		if (update_port_config) {
7015 			mutex_enter(&qlge->mbx_mutex);
7016 			(void) ql_set_mpi_port_config(qlge,
7017 			    qlge->port_cfg_info);
7018 			mutex_exit(&qlge->mbx_mutex);
7019 		}
7020 	} else
7021 		cmn_err(CE_WARN, "ql_get_port_cfg failed");
7022 
7023 	/* Start up the rx queues. */
7024 	for (i = 0; i < qlge->rx_ring_count; i++) {
7025 		status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
7026 		if (status) {
7027 			cmn_err(CE_WARN,
7028 			    "Failed to start rx ring[%d]", i);
7029 			return (status);
7030 		}
7031 	}
7032 
7033 	/*
7034 	 * If there is more than one inbound completion queue
7035 	 * then download a RICB to configure RSS.
7036 	 */
7037 	if (qlge->rss_ring_count > 1) {
7038 		status = ql_start_rss(qlge);
7039 		if (status) {
7040 			cmn_err(CE_WARN, "Failed to start RSS.");
7041 			return (status);
7042 		}
7043 	}
7044 
7045 	/* Start up the tx queues. */
7046 	for (i = 0; i < qlge->tx_ring_count; i++) {
7047 		status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
7048 		if (status) {
7049 			cmn_err(CE_WARN,
7050 			    "Failed to start tx ring[%d]", i);
7051 			return (status);
7052 		}
7053 	}
7054 	qlge->selected_tx_ring = 0;
7055 	/* Set the frame routing filter. */
7056 	status = ql_route_initialize(qlge);
7057 	if (status) {
7058 		cmn_err(CE_WARN,
7059 		    "Failed to init CAM/Routing tables.");
7060 		return (status);
7061 	}
7062 
7063 	return (status);
7064 }
7065 /*
7066  * Issue soft reset to chip.
7067  */
7068 static int
7069 ql_asic_reset(qlge_t *qlge)
7070 {
7071 	int status = DDI_SUCCESS;
7072 
7073 	ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
7074 	    |FUNCTION_RESET);
7075 
7076 	if (ql_wait_reg_bit(qlge, REG_RESET_FAILOVER, FUNCTION_RESET,
7077 	    BIT_RESET, 0) != DDI_SUCCESS) {
7078 		cmn_err(CE_WARN,
7079 		    "TIMEOUT!!! errored out of resetting the chip!");
7080 		status = DDI_FAILURE;
7081 	}
7082 
7083 	return (status);
7084 }
7085 
7086 /*
7087  * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
7088  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7089  * to be used by hardware.
7090  */
7091 static void
7092 ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7093 {
7094 	struct bq_desc *sbq_desc;
7095 	int i;
7096 	uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
7097 	uint32_t arm_count;
7098 
7099 	if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
7100 		arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
7101 	else {
7102 		/* Adjust to a multiple of 16 */
7103 		arm_count = (rx_ring->sbuf_free_count / 16) * 16;
7104 #ifdef QLGE_LOAD_UNLOAD
7105 		cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
7106 #endif
7107 	}
7108 	for (i = 0; i < arm_count; i++) {
7109 		sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
7110 		if (sbq_desc == NULL)
7111 			break;
7112 		/* Arm asic */
7113 		*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
7114 		sbq_entry++;
7115 
7116 		/* link the descriptors to in_use_list */
7117 		ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
7118 		rx_ring->sbq_prod_idx++;
7119 	}
7120 	ql_update_sbq_prod_idx(qlge, rx_ring);
7121 }
7122 
7123 /*
7124  * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
7125  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7126  * to be used by hardware.
7127  */
7128 static void
7129 ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7130 {
7131 	struct bq_desc *lbq_desc;
7132 	int i;
7133 	uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
7134 	uint32_t arm_count;
7135 
7136 	if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
7137 		arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
7138 	else {
7139 		/* Adjust to a multiple of 16 */
7140 		arm_count = (rx_ring->lbuf_free_count / 16) * 16;
7141 #ifdef QLGE_LOAD_UNLOAD
7142 		cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
7143 #endif
7144 	}
7145 	for (i = 0; i < arm_count; i++) {
7146 		lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
7147 		if (lbq_desc == NULL)
7148 			break;
7149 		/* Arm asic */
7150 		*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
7151 		lbq_entry++;
7152 
7153 		/* link the descriptors to in_use_list */
7154 		ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
7155 		rx_ring->lbq_prod_idx++;
7156 	}
7157 	ql_update_lbq_prod_idx(qlge, rx_ring);
7158 }
7159 
7160 
7161 /*
7162  * Initializes the adapter by configuring request and response queues,
7163  * allocates and ARMs small and large receive buffers to the
7164  * hardware
7165  */
7166 static int
7167 ql_bringup_adapter(qlge_t *qlge)
7168 {
7169 	int i;
7170 
7171 	if (ql_device_initialize(qlge) != DDI_SUCCESS) {
7172 		cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
7173 		    __func__, qlge->instance);
7174 		goto err_bringup;
7175 	}
7176 	qlge->sequence |= INIT_ADAPTER_UP;
7177 
7178 #ifdef QLGE_TRACK_BUFFER_USAGE
7179 	for (i = 0; i < qlge->rx_ring_count; i++) {
7180 		if (qlge->rx_ring[i].type != TX_Q) {
7181 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
7182 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
7183 		}
7184 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
7185 	}
7186 #endif
7187 	/* Arm buffers */
7188 	for (i = 0; i < qlge->rx_ring_count; i++) {
7189 		if (qlge->rx_ring[i].type != TX_Q) {
7190 			ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
7191 			ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
7192 		}
7193 	}
7194 
7195 	/* Enable work/request queues */
7196 	for (i = 0; i < qlge->tx_ring_count; i++) {
7197 		if (qlge->tx_ring[i].valid_db_reg)
7198 			ql_write_doorbell_reg(qlge,
7199 			    qlge->tx_ring[i].valid_db_reg,
7200 			    REQ_Q_VALID);
7201 	}
7202 
7203 	/* Enable completion queues */
7204 	for (i = 0; i < qlge->rx_ring_count; i++) {
7205 		if (qlge->rx_ring[i].valid_db_reg)
7206 			ql_write_doorbell_reg(qlge,
7207 			    qlge->rx_ring[i].valid_db_reg,
7208 			    RSP_Q_VALID);
7209 	}
7210 
7211 	for (i = 0; i < qlge->tx_ring_count; i++) {
7212 		mutex_enter(&qlge->tx_ring[i].tx_lock);
7213 		qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
7214 		mutex_exit(&qlge->tx_ring[i].tx_lock);
7215 	}
7216 
7217 	for (i = 0; i < qlge->rx_ring_count; i++) {
7218 		mutex_enter(&qlge->rx_ring[i].rx_lock);
7219 		qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
7220 		mutex_exit(&qlge->rx_ring[i].rx_lock);
7221 	}
7222 
7223 	/* This mutex will get re-acquired in enable_completion interrupt */
7224 	mutex_exit(&qlge->hw_mutex);
7225 	/* Traffic can start flowing now */
7226 	ql_enable_all_completion_interrupts(qlge);
7227 	mutex_enter(&qlge->hw_mutex);
7228 
7229 	ql_enable_global_interrupt(qlge);
7230 
7231 	qlge->sequence |= ADAPTER_INIT;
7232 	return (DDI_SUCCESS);
7233 
7234 err_bringup:
7235 	(void) ql_asic_reset(qlge);
7236 	return (DDI_FAILURE);
7237 }
7238 
7239 /*
7240  * Initialize mutexes of each rx/tx rings
7241  */
7242 static int
7243 ql_init_rx_tx_locks(qlge_t *qlge)
7244 {
7245 	struct tx_ring *tx_ring;
7246 	struct rx_ring *rx_ring;
7247 	int i;
7248 
7249 	for (i = 0; i < qlge->tx_ring_count; i++) {
7250 		tx_ring = &qlge->tx_ring[i];
7251 		mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
7252 		    DDI_INTR_PRI(qlge->intr_pri));
7253 	}
7254 
7255 	for (i = 0; i < qlge->rx_ring_count; i++) {
7256 		rx_ring = &qlge->rx_ring[i];
7257 		mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
7258 		    DDI_INTR_PRI(qlge->intr_pri));
7259 		mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
7260 		    DDI_INTR_PRI(qlge->intr_pri));
7261 		mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
7262 		    DDI_INTR_PRI(qlge->intr_pri));
7263 	}
7264 
7265 	return (DDI_SUCCESS);
7266 }
7267 
7268 /*ARGSUSED*/
7269 /*
7270  * Simply call pci_ereport_post which generates ereports for errors
7271  * that occur in the PCI local bus configuration status registers.
7272  */
7273 static int
7274 ql_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7275 {
7276 	pci_ereport_post(dip, err, NULL);
7277 	return (err->fme_status);
7278 }
7279 
7280 static void
7281 ql_fm_init(qlge_t *qlge)
7282 {
7283 	ddi_iblock_cookie_t iblk;
7284 
7285 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) entered, FMA capability %x\n",
7286 	    qlge->instance, qlge->fm_capabilities));
7287 	/*
7288 	 * Register capabilities with IO Fault Services. The capabilities
7289 	 * set above may not be supported by the parent nexus, in that case
7290 	 * some capability bits may be cleared.
7291 	 */
7292 	if (qlge->fm_capabilities)
7293 		ddi_fm_init(qlge->dip, &qlge->fm_capabilities, &iblk);
7294 
7295 	/*
7296 	 * Initialize pci ereport capabilities if ereport capable
7297 	 */
7298 	if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7299 	    DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7300 		pci_ereport_setup(qlge->dip);
7301 	}
7302 
7303 	/* Register error callback if error callback capable */
7304 	if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7305 		ddi_fm_handler_register(qlge->dip,
7306 		    ql_fm_error_cb, (void*) qlge);
7307 	}
7308 
7309 	/*
7310 	 * DDI_FLGERR_ACC indicates:
7311 	 *  Driver will check its access handle(s) for faults on
7312 	 *   a regular basis by calling ddi_fm_acc_err_get
7313 	 *  Driver is able to cope with incorrect results of I/O
7314 	 *   operations resulted from an I/O fault
7315 	 */
7316 	if (DDI_FM_ACC_ERR_CAP(qlge->fm_capabilities)) {
7317 		ql_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7318 	}
7319 
7320 	/*
7321 	 * DDI_DMA_FLAGERR indicates:
7322 	 *  Driver will check its DMA handle(s) for faults on a
7323 	 *   regular basis using ddi_fm_dma_err_get
7324 	 *  Driver is able to cope with incorrect results of DMA
7325 	 *   operations resulted from an I/O fault
7326 	 */
7327 	if (DDI_FM_DMA_ERR_CAP(qlge->fm_capabilities)) {
7328 		tx_mapping_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7329 		dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7330 	}
7331 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) done\n",
7332 	    qlge->instance));
7333 }
7334 
7335 static void
7336 ql_fm_fini(qlge_t *qlge)
7337 {
7338 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) entered\n",
7339 	    qlge->instance));
7340 	/* Only unregister FMA capabilities if we registered some */
7341 	if (qlge->fm_capabilities) {
7342 
7343 		/*
7344 		 * Release any resources allocated by pci_ereport_setup()
7345 		 */
7346 		if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7347 		    DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7348 			pci_ereport_teardown(qlge->dip);
7349 
7350 		/*
7351 		 * Un-register error callback if error callback capable
7352 		 */
7353 		if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7354 			ddi_fm_handler_unregister(qlge->dip);
7355 
7356 		/* Unregister from IO Fault Services */
7357 		ddi_fm_fini(qlge->dip);
7358 	}
7359 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) done\n",
7360 	    qlge->instance));
7361 }
7362 /*
7363  * ql_attach - Driver attach.
7364  */
7365 static int
7366 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
7367 {
7368 	int instance;
7369 	qlge_t *qlge = NULL;
7370 	int rval;
7371 	uint16_t w;
7372 	mac_register_t *macp = NULL;
7373 	uint32_t data;
7374 
7375 	rval = DDI_FAILURE;
7376 
7377 	/* first get the instance */
7378 	instance = ddi_get_instance(dip);
7379 
7380 	switch (cmd) {
7381 	case DDI_ATTACH:
7382 		/*
7383 		 * Allocate our per-device-instance structure
7384 		 */
7385 		qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
7386 		ASSERT(qlge != NULL);
7387 		qlge->sequence |= INIT_SOFTSTATE_ALLOC;
7388 
7389 		qlge->dip = dip;
7390 		qlge->instance = instance;
7391 		/* Set up the coalescing parameters. */
7392 		qlge->ql_dbgprnt = 0;
7393 #if QL_DEBUG
7394 		qlge->ql_dbgprnt = QL_DEBUG;
7395 #endif /* QL_DEBUG */
7396 
7397 		/*
7398 		 * Initialize for fma support
7399 		 */
7400 		/* fault management (fm) capabilities. */
7401 		qlge->fm_capabilities =
7402 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE;
7403 		data = ql_get_prop(qlge, "fm-capable");
7404 		if (data <= 0xf) {
7405 			qlge->fm_capabilities = data;
7406 		}
7407 		ql_fm_init(qlge);
7408 		qlge->sequence |= INIT_FM;
7409 		QL_PRINT(DBG_INIT, ("ql_attach(%d): fma init done\n",
7410 		    qlge->instance));
7411 
7412 		/*
7413 		 * Setup the ISP8x00 registers address mapping to be
7414 		 * accessed by this particular driver.
7415 		 * 0x0   Configuration Space
7416 		 * 0x1   I/O Space
7417 		 * 0x2   1st Memory Space address - Control Register Set
7418 		 * 0x3   2nd Memory Space address - Doorbell Memory Space
7419 		 */
7420 		w = 2;
7421 		if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
7422 		    sizeof (dev_reg_t), &ql_dev_acc_attr,
7423 		    &qlge->dev_handle) != DDI_SUCCESS) {
7424 			cmn_err(CE_WARN, "%s(%d): Unable to map device "
7425 			    "registers", ADAPTER_NAME, instance);
7426 			break;
7427 		}
7428 		QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
7429 		    qlge->iobase));
7430 		qlge->sequence |= INIT_REGS_SETUP;
7431 
7432 		/* map Doorbell memory space */
7433 		w = 3;
7434 		if (ddi_regs_map_setup(dip, w,
7435 		    (caddr_t *)&qlge->doorbell_reg_iobase, 0,
7436 		    0x100000 /* sizeof (dev_doorbell_reg_t) */,
7437 		    &ql_dev_acc_attr,
7438 		    &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
7439 			cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
7440 			    "registers",
7441 			    ADAPTER_NAME, instance);
7442 			break;
7443 		}
7444 		QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
7445 		    qlge->doorbell_reg_iobase));
7446 		qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
7447 
7448 		/*
7449 		 * Allocate a macinfo structure for this instance
7450 		 */
7451 		if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
7452 			cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
7453 			    __func__, instance);
7454 			break;
7455 		}
7456 		/* save adapter status to dip private data */
7457 		ddi_set_driver_private(dip, qlge);
7458 		QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
7459 		    ADAPTER_NAME, instance));
7460 		qlge->sequence |= INIT_MAC_ALLOC;
7461 
7462 		/*
7463 		 * Attach this instance of the device
7464 		 */
7465 		/* Setup PCI Local Bus Configuration resource. */
7466 		if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
7467 			cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
7468 			    ADAPTER_NAME, instance);
7469 			if (qlge->fm_enable) {
7470 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7471 				ddi_fm_service_impact(qlge->dip,
7472 				    DDI_SERVICE_LOST);
7473 			}
7474 			break;
7475 		}
7476 		qlge->sequence |= INIT_PCI_CONFIG_SETUP;
7477 		QL_PRINT(DBG_GLD, ("ql_attach(%d): pci_config_setup done\n",
7478 		    instance));
7479 
7480 		if (ql_init_instance(qlge) != DDI_SUCCESS) {
7481 			cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
7482 			    "instance", ADAPTER_NAME, instance);
7483 			if (qlge->fm_enable) {
7484 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7485 				ddi_fm_service_impact(qlge->dip,
7486 				    DDI_SERVICE_LOST);
7487 			}
7488 			break;
7489 		}
7490 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_init_instance done\n",
7491 		    instance));
7492 
7493 		/* Setup interrupt vectors */
7494 		if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
7495 			break;
7496 		}
7497 		qlge->sequence |= INIT_INTR_ALLOC;
7498 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_alloc_irqs done\n",
7499 		    instance));
7500 
7501 		/* Configure queues */
7502 		if (ql_setup_rings(qlge) != DDI_SUCCESS) {
7503 			break;
7504 		}
7505 		qlge->sequence |= INIT_SETUP_RINGS;
7506 		QL_PRINT(DBG_GLD, ("ql_attach(%d): setup rings done\n",
7507 		    instance));
7508 
7509 		/*
7510 		 * Allocate memory resources
7511 		 */
7512 		if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
7513 			cmn_err(CE_WARN, "%s(%d): memory allocation failed",
7514 			    __func__, qlge->instance);
7515 			break;
7516 		}
7517 		qlge->sequence |= INIT_MEMORY_ALLOC;
7518 		QL_PRINT(DBG_GLD, ("ql_alloc_mem_resources(%d) done\n",
7519 		    instance));
7520 
7521 		/*
7522 		 * Map queues to interrupt vectors
7523 		 */
7524 		ql_resolve_queues_to_irqs(qlge);
7525 
7526 		/* Initialize mutex, need the interrupt priority */
7527 		(void) ql_init_rx_tx_locks(qlge);
7528 		qlge->sequence |= INIT_LOCKS_CREATED;
7529 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_rx_tx_locks done\n",
7530 		    ADAPTER_NAME, instance));
7531 
7532 		/*
7533 		 * Use a soft interrupt to do something that we do not want
7534 		 * to do in regular network functions or with mutexs being held
7535 		 */
7536 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
7537 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
7538 		    != DDI_SUCCESS) {
7539 			break;
7540 		}
7541 
7542 		if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
7543 		    DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
7544 		    != DDI_SUCCESS) {
7545 			break;
7546 		}
7547 
7548 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
7549 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
7550 		    != DDI_SUCCESS) {
7551 			break;
7552 		}
7553 		qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
7554 		QL_PRINT(DBG_INIT, ("%s(%d): ddi_intr_add_softint done\n",
7555 		    ADAPTER_NAME, instance));
7556 
7557 		/*
7558 		 * mutex to protect the adapter state structure.
7559 		 * initialize mutexes according to the interrupt priority
7560 		 */
7561 		mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
7562 		    DDI_INTR_PRI(qlge->intr_pri));
7563 		mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
7564 		    DDI_INTR_PRI(qlge->intr_pri));
7565 		mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
7566 		    DDI_INTR_PRI(qlge->intr_pri));
7567 
7568 		/* Mailbox wait and interrupt conditional variable. */
7569 		cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
7570 		qlge->sequence |= INIT_MUTEX;
7571 		QL_PRINT(DBG_INIT, ("%s(%d): mutex_init done\n",
7572 		    ADAPTER_NAME, instance));
7573 
7574 		/*
7575 		 * KStats
7576 		 */
7577 		if (ql_init_kstats(qlge) != DDI_SUCCESS) {
7578 			cmn_err(CE_WARN, "%s(%d): KState initialization failed",
7579 			    ADAPTER_NAME, instance);
7580 			break;
7581 		}
7582 		qlge->sequence |= INIT_KSTATS;
7583 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_kstats done\n",
7584 		    ADAPTER_NAME, instance));
7585 
7586 		/*
7587 		 * Initialize gld macinfo structure
7588 		 */
7589 		ql_gld3_init(qlge, macp);
7590 		/*
7591 		 * Add interrupt handlers
7592 		 */
7593 		if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
7594 			cmn_err(CE_WARN, "Failed to add interrupt "
7595 			    "handlers");
7596 			break;
7597 		}
7598 		qlge->sequence |= INIT_ADD_INTERRUPT;
7599 		QL_PRINT(DBG_INIT, ("%s(%d): Add interrupt handler done\n",
7600 		    ADAPTER_NAME, instance));
7601 
7602 		/*
7603 		 * MAC Register
7604 		 */
7605 		if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
7606 			cmn_err(CE_WARN, "%s(%d): mac_register failed",
7607 			    __func__, instance);
7608 			break;
7609 		}
7610 		qlge->sequence |= INIT_MAC_REGISTERED;
7611 		QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
7612 		    ADAPTER_NAME, instance));
7613 
7614 		mac_free(macp);
7615 		macp = NULL;
7616 
7617 		qlge->mac_flags = QL_MAC_ATTACHED;
7618 
7619 		ddi_report_dev(dip);
7620 
7621 		rval = DDI_SUCCESS;
7622 
7623 	break;
7624 /*
7625  * DDI_RESUME
7626  * When called  with  cmd  set  to  DDI_RESUME,  attach()  must
7627  * restore  the hardware state of a device (power may have been
7628  * removed from the device), allow  pending  requests  to  con-
7629  * tinue,  and  service  new requests. In this case, the driver
7630  * must not  make  any  assumptions  about  the  state  of  the
7631  * hardware,  but  must  restore the state of the device except
7632  * for the power level of components.
7633  *
7634  */
7635 	case DDI_RESUME:
7636 
7637 		if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
7638 			return (DDI_FAILURE);
7639 
7640 		QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
7641 		    __func__, qlge->instance));
7642 
7643 		mutex_enter(&qlge->gen_mutex);
7644 		rval = ql_do_start(qlge);
7645 		mutex_exit(&qlge->gen_mutex);
7646 		break;
7647 
7648 	default:
7649 		break;
7650 	}
7651 
7652 	/* if failed to attach */
7653 	if ((cmd == DDI_ATTACH) && (rval != DDI_SUCCESS) && (qlge != NULL)) {
7654 		cmn_err(CE_WARN, "qlge driver attach failed, sequence %x",
7655 		    qlge->sequence);
7656 		ql_free_resources(qlge);
7657 	}
7658 
7659 	return (rval);
7660 }
7661 
7662 /*
7663  * Unbind all pending tx dma handles during driver bring down
7664  */
7665 static void
7666 ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
7667 {
7668 	struct tx_ring_desc *tx_ring_desc;
7669 	int i, j;
7670 
7671 	if (tx_ring->wq_desc) {
7672 		tx_ring_desc = tx_ring->wq_desc;
7673 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
7674 			for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
7675 				if (tx_ring_desc->tx_dma_handle[j]) {
7676 					(void) ddi_dma_unbind_handle(
7677 					    tx_ring_desc->tx_dma_handle[j]);
7678 				}
7679 			}
7680 			tx_ring_desc->tx_dma_handle_used = 0;
7681 		} /* end of for loop */
7682 	}
7683 }
7684 /*
7685  * Wait for all the packets sent to the chip to finish transmission
7686  * to prevent buffers to be unmapped before or during a transmit operation
7687  */
7688 static int
7689 ql_wait_tx_quiesce(qlge_t *qlge)
7690 {
7691 	int count = MAX_TX_WAIT_COUNT, i;
7692 	int rings_done;
7693 	volatile struct tx_ring *tx_ring;
7694 	uint32_t consumer_idx;
7695 	uint32_t producer_idx;
7696 	uint32_t temp;
7697 	int done = 0;
7698 	int rval = DDI_FAILURE;
7699 
7700 	while (!done) {
7701 		rings_done = 0;
7702 
7703 		for (i = 0; i < qlge->tx_ring_count; i++) {
7704 			tx_ring = &qlge->tx_ring[i];
7705 			temp = ql_read_doorbell_reg(qlge,
7706 			    tx_ring->prod_idx_db_reg);
7707 			producer_idx = temp & 0x0000ffff;
7708 			consumer_idx = (temp >> 16);
7709 
7710 			if (qlge->isr_stride) {
7711 				struct rx_ring *ob_ring;
7712 				ob_ring = &qlge->rx_ring[tx_ring->cq_id];
7713 				if (producer_idx != ob_ring->cnsmr_idx) {
7714 					cmn_err(CE_NOTE, " force clean \n");
7715 					(void) ql_clean_outbound_rx_ring(
7716 					    ob_ring);
7717 				}
7718 			}
7719 			/*
7720 			 * Get the pending iocb count, ones which have not been
7721 			 * pulled down by the chip
7722 			 */
7723 			if (producer_idx >= consumer_idx)
7724 				temp = (producer_idx - consumer_idx);
7725 			else
7726 				temp = (tx_ring->wq_len - consumer_idx) +
7727 				    producer_idx;
7728 
7729 			if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
7730 				rings_done++;
7731 			else {
7732 				done = 1;
7733 				break;
7734 			}
7735 		}
7736 
7737 		/* If all the rings are done */
7738 		if (rings_done >= qlge->tx_ring_count) {
7739 #ifdef QLGE_LOAD_UNLOAD
7740 			cmn_err(CE_NOTE, "%s(%d) done successfully \n",
7741 			    __func__, qlge->instance);
7742 #endif
7743 			rval = DDI_SUCCESS;
7744 			break;
7745 		}
7746 
7747 		qlge_delay(100);
7748 
7749 		count--;
7750 		if (!count) {
7751 
7752 			count = MAX_TX_WAIT_COUNT;
7753 #ifdef QLGE_LOAD_UNLOAD
7754 			volatile struct rx_ring *rx_ring;
7755 			cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
7756 			    " Transmits on queue %d to complete .\n",
7757 			    __func__, qlge->instance,
7758 			    (qlge->tx_ring[i].wq_len -
7759 			    qlge->tx_ring[i].tx_free_count),
7760 			    i);
7761 
7762 			rx_ring = &qlge->rx_ring[i+1];
7763 			temp = ql_read_doorbell_reg(qlge,
7764 			    rx_ring->cnsmr_idx_db_reg);
7765 			consumer_idx = temp & 0x0000ffff;
7766 			producer_idx = (temp >> 16);
7767 			cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
7768 			    " Producer %d, Consumer %d\n",
7769 			    __func__, qlge->instance,
7770 			    i+1,
7771 			    producer_idx, consumer_idx);
7772 
7773 			temp = ql_read_doorbell_reg(qlge,
7774 			    tx_ring->prod_idx_db_reg);
7775 			producer_idx = temp & 0x0000ffff;
7776 			consumer_idx = (temp >> 16);
7777 			cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
7778 			    " Producer %d, Consumer %d\n",
7779 			    __func__, qlge->instance, i,
7780 			    producer_idx, consumer_idx);
7781 #endif
7782 
7783 			/* For now move on */
7784 			break;
7785 		}
7786 	}
7787 	/* Stop the request queue */
7788 	mutex_enter(&qlge->hw_mutex);
7789 	for (i = 0; i < qlge->tx_ring_count; i++) {
7790 		if (qlge->tx_ring[i].valid_db_reg) {
7791 			ql_write_doorbell_reg(qlge,
7792 			    qlge->tx_ring[i].valid_db_reg, 0);
7793 		}
7794 	}
7795 	mutex_exit(&qlge->hw_mutex);
7796 	return (rval);
7797 }
7798 
7799 /*
7800  * Wait for all the receives indicated to the stack to come back
7801  */
7802 static int
7803 ql_wait_rx_complete(qlge_t *qlge)
7804 {
7805 	int i;
7806 	/* Disable all the completion queues */
7807 	mutex_enter(&qlge->hw_mutex);
7808 	for (i = 0; i < qlge->rx_ring_count; i++) {
7809 		if (qlge->rx_ring[i].valid_db_reg) {
7810 			ql_write_doorbell_reg(qlge,
7811 			    qlge->rx_ring[i].valid_db_reg, 0);
7812 		}
7813 	}
7814 	mutex_exit(&qlge->hw_mutex);
7815 
7816 	/* Wait for OS to return all rx buffers */
7817 	qlge_delay(QL_ONE_SEC_DELAY);
7818 	return (DDI_SUCCESS);
7819 }
7820 
7821 /*
7822  * stop the driver
7823  */
7824 static int
7825 ql_bringdown_adapter(qlge_t *qlge)
7826 {
7827 	int i;
7828 	int status = DDI_SUCCESS;
7829 
7830 	qlge->mac_flags = QL_MAC_BRINGDOWN;
7831 	if (qlge->sequence & ADAPTER_INIT) {
7832 		/* stop forwarding external packets to driver */
7833 		status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7834 		if (status)
7835 			return (status);
7836 		(void) ql_stop_routing(qlge);
7837 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7838 		/*
7839 		 * Set the flag for receive and transmit
7840 		 * operations to cease
7841 		 */
7842 		for (i = 0; i < qlge->tx_ring_count; i++) {
7843 			mutex_enter(&qlge->tx_ring[i].tx_lock);
7844 			qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7845 			mutex_exit(&qlge->tx_ring[i].tx_lock);
7846 		}
7847 
7848 		for (i = 0; i < qlge->rx_ring_count; i++) {
7849 			mutex_enter(&qlge->rx_ring[i].rx_lock);
7850 			qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7851 			mutex_exit(&qlge->rx_ring[i].rx_lock);
7852 		}
7853 
7854 		/*
7855 		 * Need interrupts to be running while the transmit
7856 		 * completions are cleared. Wait for the packets
7857 		 * queued to the chip to be sent out
7858 		 */
7859 		(void) ql_wait_tx_quiesce(qlge);
7860 		/* Interrupts not needed from now */
7861 		ql_disable_all_completion_interrupts(qlge);
7862 
7863 		mutex_enter(&qlge->hw_mutex);
7864 		/* Disable Global interrupt */
7865 		ql_disable_global_interrupt(qlge);
7866 		mutex_exit(&qlge->hw_mutex);
7867 
7868 		/* Wait for all the indicated packets to come back */
7869 		status = ql_wait_rx_complete(qlge);
7870 
7871 		mutex_enter(&qlge->hw_mutex);
7872 		/* Reset adapter */
7873 		(void) ql_asic_reset(qlge);
7874 		/*
7875 		 * Unbind all tx dma handles to prevent pending tx descriptors'
7876 		 * dma handles from being re-used.
7877 		 */
7878 		for (i = 0; i < qlge->tx_ring_count; i++) {
7879 			ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7880 		}
7881 
7882 		qlge->sequence &= ~ADAPTER_INIT;
7883 
7884 		mutex_exit(&qlge->hw_mutex);
7885 	}
7886 	return (status);
7887 }
7888 
7889 /*
7890  * ql_detach
7891  * Used to remove all the states associated with a given
7892  * instances of a device node prior to the removal of that
7893  * instance from the system.
7894  */
7895 static int
7896 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7897 {
7898 	qlge_t *qlge;
7899 	int rval;
7900 
7901 	rval = DDI_SUCCESS;
7902 
7903 	switch (cmd) {
7904 	case DDI_DETACH:
7905 
7906 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7907 			return (DDI_FAILURE);
7908 		rval = ql_bringdown_adapter(qlge);
7909 		if (rval != DDI_SUCCESS)
7910 			break;
7911 
7912 		qlge->mac_flags = QL_MAC_DETACH;
7913 
7914 		/* free memory resources */
7915 		if (qlge->sequence & INIT_MEMORY_ALLOC) {
7916 			ql_free_mem_resources(qlge);
7917 			qlge->sequence &= ~INIT_MEMORY_ALLOC;
7918 		}
7919 		ql_free_resources(qlge);
7920 
7921 		break;
7922 
7923 	case DDI_SUSPEND:
7924 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7925 			return (DDI_FAILURE);
7926 
7927 		mutex_enter(&qlge->gen_mutex);
7928 		if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7929 		    (qlge->mac_flags == QL_MAC_STARTED)) {
7930 			(void) ql_do_stop(qlge);
7931 		}
7932 		qlge->mac_flags = QL_MAC_SUSPENDED;
7933 		mutex_exit(&qlge->gen_mutex);
7934 
7935 		break;
7936 	default:
7937 		rval = DDI_FAILURE;
7938 		break;
7939 	}
7940 
7941 	return (rval);
7942 }
7943 
7944 /*
7945  * quiesce(9E) entry point.
7946  *
7947  * This function is called when the system is single-threaded at high
7948  * PIL with preemption disabled. Therefore, this function must not be
7949  * blocked.
7950  *
7951  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7952  */
7953 int
7954 ql_quiesce(dev_info_t *dip)
7955 {
7956 	qlge_t *qlge;
7957 	int i;
7958 
7959 	if ((qlge = QL_GET_DEV(dip)) == NULL)
7960 		return (DDI_FAILURE);
7961 
7962 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
7963 		/* stop forwarding external packets to driver */
7964 		(void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7965 		(void) ql_stop_routing(qlge);
7966 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7967 		/* Stop all the request queues */
7968 		for (i = 0; i < qlge->tx_ring_count; i++) {
7969 			if (qlge->tx_ring[i].valid_db_reg) {
7970 				ql_write_doorbell_reg(qlge,
7971 				    qlge->tx_ring[i].valid_db_reg, 0);
7972 			}
7973 		}
7974 		qlge_delay(QL_ONE_SEC_DELAY/4);
7975 		/* Interrupts not needed from now */
7976 		/* Disable MPI interrupt */
7977 		ql_write_reg(qlge, REG_INTERRUPT_MASK,
7978 		    (INTR_MASK_PI << 16));
7979 		ql_disable_global_interrupt(qlge);
7980 
7981 		/* Disable all the rx completion queues */
7982 		for (i = 0; i < qlge->rx_ring_count; i++) {
7983 			if (qlge->rx_ring[i].valid_db_reg) {
7984 				ql_write_doorbell_reg(qlge,
7985 				    qlge->rx_ring[i].valid_db_reg, 0);
7986 			}
7987 		}
7988 		qlge_delay(QL_ONE_SEC_DELAY/4);
7989 		qlge->mac_flags = QL_MAC_STOPPED;
7990 		/* Reset adapter */
7991 		(void) ql_asic_reset(qlge);
7992 		qlge_delay(100);
7993 	}
7994 
7995 	return (DDI_SUCCESS);
7996 }
7997 
7998 QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
7999 
8000 /*
8001  * Loadable Driver Interface Structures.
8002  * Declare and initialize the module configuration section...
8003  */
8004 static struct modldrv modldrv = {
8005 	&mod_driverops,		/* type of module: driver */
8006 	version,		/* name of module */
8007 	&ql_ops			/* driver dev_ops */
8008 };
8009 
8010 static struct modlinkage modlinkage = {
8011 	MODREV_1, 	&modldrv,	NULL
8012 };
8013 
8014 /*
8015  * Loadable Module Routines
8016  */
8017 
8018 /*
8019  * _init
8020  * Initializes a loadable module. It is called before any other
8021  * routine in a loadable module.
8022  */
8023 int
8024 _init(void)
8025 {
8026 	int rval;
8027 
8028 	mac_init_ops(&ql_ops, ADAPTER_NAME);
8029 	rval = mod_install(&modlinkage);
8030 	if (rval != DDI_SUCCESS) {
8031 		mac_fini_ops(&ql_ops);
8032 		cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
8033 		    ADAPTER_NAME);
8034 	}
8035 
8036 	return (rval);
8037 }
8038 
8039 /*
8040  * _fini
8041  * Prepares a module for unloading. It is called when the system
8042  * wants to unload a module. If the module determines that it can
8043  * be unloaded, then _fini() returns the value returned by
8044  * mod_remove(). Upon successful return from _fini() no other
8045  * routine in the module will be called before _init() is called.
8046  */
8047 int
8048 _fini(void)
8049 {
8050 	int rval;
8051 
8052 	rval = mod_remove(&modlinkage);
8053 	if (rval == DDI_SUCCESS) {
8054 		mac_fini_ops(&ql_ops);
8055 	}
8056 
8057 	return (rval);
8058 }
8059 
8060 /*
8061  * _info
8062  * Returns information about loadable module.
8063  */
8064 int
8065 _info(struct modinfo *modinfop)
8066 {
8067 	return (mod_info(&modlinkage, modinfop));
8068 }
8069