1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 QLogic Corporation. All rights reserved.
24  */
25 
26 #include <qlge.h>
27 #include <sys/atomic.h>
28 #include <sys/strsubr.h>
29 #include <sys/pattr.h>
30 #include <netinet/in.h>
31 #include <netinet/ip.h>
32 #include <netinet/ip6.h>
33 #include <netinet/tcp.h>
34 #include <netinet/udp.h>
35 #include <inet/ip.h>
36 
37 
38 
39 /*
40  * Local variables
41  */
42 static struct ether_addr ql_ether_broadcast_addr =
43 	{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
44 static char version[] = "GLDv3 QLogic 81XX " VERSIONSTR;
45 
46 /*
47  * Local function prototypes
48  */
49 static void ql_free_resources(qlge_t *);
50 static void ql_fini_kstats(qlge_t *);
51 static uint32_t ql_get_link_state(qlge_t *);
52 static void ql_read_conf(qlge_t *);
53 static int ql_alloc_phys(dev_info_t *, ddi_dma_handle_t *,
54     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
55     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
56 static int ql_alloc_phys_rbuf(dev_info_t *, ddi_dma_handle_t *,
57     ddi_device_acc_attr_t *, uint_t, ddi_acc_handle_t *,
58     size_t, size_t, caddr_t *, ddi_dma_cookie_t *);
59 static void ql_free_phys(ddi_dma_handle_t *, ddi_acc_handle_t *);
60 static int ql_set_routing_reg(qlge_t *, uint32_t, uint32_t, int);
61 static int ql_route_initialize(qlge_t *);
62 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
63 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
64 static int ql_bringdown_adapter(qlge_t *);
65 static int ql_bringup_adapter(qlge_t *);
66 static int ql_asic_reset(qlge_t *);
67 static void ql_wake_mpi_reset_soft_intr(qlge_t *);
68 static void ql_stop_timer(qlge_t *qlge);
69 static void ql_fm_fini(qlge_t *qlge);
70 int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring);
71 
72 /*
73  * TX dma maping handlers allow multiple sscatter-gather lists
74  */
75 ddi_dma_attr_t  tx_mapping_dma_attr = {
76 	DMA_ATTR_V0,			/* dma_attr_version */
77 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
78 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
79 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
80 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
81 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
82 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
83 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
84 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
85 	QL_MAX_TX_DMA_HANDLES,		/* s/g list length */
86 	QL_DMA_GRANULARITY,		/* granularity of device */
87 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
88 };
89 
90 /*
91  * Receive buffers and Request/Response queues do not allow scatter-gather lists
92  */
93 ddi_dma_attr_t  dma_attr = {
94 	DMA_ATTR_V0,			/* dma_attr_version */
95 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
96 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
97 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
98 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment, default - 8 */
99 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
100 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
101 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
102 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
103 	1,				/* s/g list length, i.e no sg list */
104 	QL_DMA_GRANULARITY,		/* granularity of device */
105 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
106 };
107 /*
108  * Receive buffers do not allow scatter-gather lists
109  */
110 ddi_dma_attr_t  dma_attr_rbuf = {
111 	DMA_ATTR_V0,			/* dma_attr_version */
112 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
113 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
114 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
115 	0x1,				/* DMA address alignment, default - 8 */
116 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
117 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
118 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
119 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
120 	1,				/* s/g list length, i.e no sg list */
121 	QL_DMA_GRANULARITY,		/* granularity of device */
122 	DDI_DMA_RELAXED_ORDERING	/* DMA transfer flags */
123 };
124 /*
125  * DMA access attribute structure.
126  */
127 /* device register access from host */
128 ddi_device_acc_attr_t ql_dev_acc_attr = {
129 	DDI_DEVICE_ATTR_V0,
130 	DDI_STRUCTURE_LE_ACC,
131 	DDI_STRICTORDER_ACC
132 };
133 
134 /* host ring descriptors */
135 ddi_device_acc_attr_t ql_desc_acc_attr = {
136 	DDI_DEVICE_ATTR_V0,
137 	DDI_NEVERSWAP_ACC,
138 	DDI_STRICTORDER_ACC
139 };
140 
141 /* host ring buffer */
142 ddi_device_acc_attr_t ql_buf_acc_attr = {
143 	DDI_DEVICE_ATTR_V0,
144 	DDI_NEVERSWAP_ACC,
145 	DDI_STRICTORDER_ACC
146 };
147 
148 /*
149  * Hash key table for Receive Side Scaling (RSS) support
150  */
151 const uint8_t key_data[] = {
152 	0x23, 0x64, 0xa1, 0xaa, 0x37, 0xc0, 0xed, 0x05, 0x2b, 0x36,
153 	0x50, 0x5c, 0x45, 0x1e, 0x7e, 0xc8, 0x5d, 0x2a, 0x54, 0x2f,
154 	0xe4, 0x3d, 0x0f, 0xbb, 0x91, 0xd9, 0x25, 0x60, 0xd4, 0xf8,
155 	0x12, 0xa0, 0x59, 0x4b, 0x9e, 0x8a, 0x51, 0xda, 0xcd, 0x49};
156 
157 /*
158  * Shadow Registers:
159  * Outbound queues have a consumer index that is maintained by the chip.
160  * Inbound queues have a producer index that is maintained by the chip.
161  * For lower overhead, these registers are "shadowed" to host memory
162  * which allows the device driver to track the queue progress without
163  * PCI reads. When an entry is placed on an inbound queue, the chip will
164  * update the relevant index register and then copy the value to the
165  * shadow register in host memory.
166  * Currently, ql_read_sh_reg only read Inbound queues'producer index.
167  */
168 
169 static inline unsigned int
170 ql_read_sh_reg(qlge_t *qlge, struct rx_ring *rx_ring)
171 {
172 	uint32_t rtn;
173 
174 	/* re-synchronize shadow prod index dma buffer before reading */
175 	(void) ddi_dma_sync(qlge->host_copy_shadow_dma_attr.dma_handle,
176 	    rx_ring->prod_idx_sh_reg_offset,
177 	    sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
178 
179 	rtn = ddi_get32(qlge->host_copy_shadow_dma_attr.acc_handle,
180 	    (uint32_t *)rx_ring->prod_idx_sh_reg);
181 
182 	return (rtn);
183 }
184 
185 /*
186  * Read 32 bit atomically
187  */
188 uint32_t
189 ql_atomic_read_32(volatile uint32_t *target)
190 {
191 	/*
192 	 * atomic_add_32_nv returns the new value after the add,
193 	 * we are adding 0 so we should get the original value
194 	 */
195 	return (atomic_add_32_nv(target, 0));
196 }
197 
198 /*
199  * Set 32 bit atomically
200  */
201 void
202 ql_atomic_set_32(volatile uint32_t *target, uint32_t newval)
203 {
204 	(void) atomic_swap_32(target, newval);
205 }
206 
207 
208 /*
209  * Setup device PCI configuration registers.
210  * Kernel context.
211  */
212 static void
213 ql_pci_config(qlge_t *qlge)
214 {
215 	uint16_t w;
216 
217 	qlge->vendor_id = (uint16_t)pci_config_get16(qlge->pci_handle,
218 	    PCI_CONF_VENID);
219 	qlge->device_id = (uint16_t)pci_config_get16(qlge->pci_handle,
220 	    PCI_CONF_DEVID);
221 
222 	/*
223 	 * we want to respect framework's setting of PCI
224 	 * configuration space command register and also
225 	 * want to make sure that all bits of interest to us
226 	 * are properly set in PCI Command register(0x04).
227 	 * PCI_COMM_IO		0x1	 I/O access enable
228 	 * PCI_COMM_MAE		0x2	 Memory access enable
229 	 * PCI_COMM_ME		0x4	 bus master enable
230 	 * PCI_COMM_MEMWR_INVAL	0x10	 memory write and invalidate enable.
231 	 */
232 	w = (uint16_t)pci_config_get16(qlge->pci_handle, PCI_CONF_COMM);
233 	w = (uint16_t)(w & (~PCI_COMM_IO));
234 	w = (uint16_t)(w | PCI_COMM_MAE | PCI_COMM_ME |
235 	    /* PCI_COMM_MEMWR_INVAL | */
236 	    PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE);
237 
238 	pci_config_put16(qlge->pci_handle, PCI_CONF_COMM, w);
239 
240 	w = pci_config_get16(qlge->pci_handle, 0x54);
241 	cmn_err(CE_NOTE, "!dev_ctl old 0x%x\n", w);
242 	w = (uint16_t)(w & (~0x7000));
243 	w = (uint16_t)(w | 0x5000);
244 	pci_config_put16(qlge->pci_handle, 0x54, w);
245 	cmn_err(CE_NOTE, "!dev_ctl new 0x%x\n", w);
246 
247 	ql_dump_pci_config(qlge);
248 }
249 
250 /*
251  * This routine parforms the neccessary steps to set GLD mac information
252  * such as Function number, xgmac mask and shift bits
253  */
254 static int
255 ql_set_mac_info(qlge_t *qlge)
256 {
257 	uint32_t value;
258 	int rval = DDI_FAILURE;
259 	uint32_t fn0_net, fn1_net;
260 
261 	/* set default value */
262 	qlge->fn0_net = FN0_NET;
263 	qlge->fn1_net = FN1_NET;
264 
265 	if (ql_read_processor_data(qlge, MPI_REG, &value) != DDI_SUCCESS) {
266 		cmn_err(CE_WARN, "%s(%d) read MPI register failed",
267 		    __func__, qlge->instance);
268 		goto exit;
269 	} else {
270 		fn0_net = (value >> 1) & 0x07;
271 		fn1_net = (value >> 5) & 0x07;
272 		if ((fn0_net > 4) || (fn1_net > 4) || (fn0_net == fn1_net)) {
273 			cmn_err(CE_WARN, "%s(%d) bad mpi register value %x, \n"
274 			    "nic0 function number %d,"
275 			    "nic1 function number %d "
276 			    "use default\n",
277 			    __func__, qlge->instance, value, fn0_net, fn1_net);
278 			goto exit;
279 		} else {
280 			qlge->fn0_net = fn0_net;
281 			qlge->fn1_net = fn1_net;
282 		}
283 	}
284 
285 	/* Get the function number that the driver is associated with */
286 	value = ql_read_reg(qlge, REG_STATUS);
287 	qlge->func_number = (uint8_t)((value >> 6) & 0x03);
288 	QL_PRINT(DBG_INIT, ("status register is:%x, func_number: %d\n",
289 	    value, qlge->func_number));
290 
291 	/* The driver is loaded on a non-NIC function? */
292 	if ((qlge->func_number != qlge->fn0_net) &&
293 	    (qlge->func_number != qlge->fn1_net)) {
294 		cmn_err(CE_WARN,
295 		    "Invalid function number = 0x%x\n", qlge->func_number);
296 		goto exit;
297 	}
298 	/* network port 0? */
299 	if (qlge->func_number == qlge->fn0_net) {
300 		qlge->xgmac_sem_mask = QL_PORT0_XGMAC_SEM_MASK;
301 		qlge->xgmac_sem_bits = QL_PORT0_XGMAC_SEM_BITS;
302 	} else {
303 		qlge->xgmac_sem_mask = QL_PORT1_XGMAC_SEM_MASK;
304 		qlge->xgmac_sem_bits = QL_PORT1_XGMAC_SEM_BITS;
305 	}
306 	rval = DDI_SUCCESS;
307 exit:
308 	return (rval);
309 
310 }
311 
312 /*
313  * write to doorbell register
314  */
315 void
316 ql_write_doorbell_reg(qlge_t *qlge, uint32_t *addr, uint32_t data)
317 {
318 	ddi_put32(qlge->dev_doorbell_reg_handle, addr, data);
319 }
320 
321 /*
322  * read from doorbell register
323  */
324 uint32_t
325 ql_read_doorbell_reg(qlge_t *qlge, uint32_t *addr)
326 {
327 	uint32_t ret;
328 
329 	ret = ddi_get32(qlge->dev_doorbell_reg_handle, addr);
330 
331 	return	(ret);
332 }
333 
334 /*
335  * This function waits for a specific bit to come ready
336  * in a given register.  It is used mostly by the initialize
337  * process, but is also used in kernel thread API such as
338  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
339  */
340 static int
341 ql_wait_reg_rdy(qlge_t *qlge, uint32_t reg, uint32_t bit, uint32_t err_bit)
342 {
343 	uint32_t temp;
344 	int count = UDELAY_COUNT;
345 
346 	while (count) {
347 		temp = ql_read_reg(qlge, reg);
348 
349 		/* check for errors */
350 		if ((temp & err_bit) != 0) {
351 			break;
352 		} else if ((temp & bit) != 0)
353 			return (DDI_SUCCESS);
354 		qlge_delay(UDELAY_DELAY);
355 		count--;
356 	}
357 	cmn_err(CE_WARN,
358 	    "Waiting for reg %x to come ready failed.", reg);
359 	if (qlge->fm_enable) {
360 		ql_fm_ereport(qlge, DDI_FM_DEVICE_NO_RESPONSE);
361 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
362 	}
363 	return (DDI_FAILURE);
364 }
365 
366 /*
367  * The CFG register is used to download TX and RX control blocks
368  * to the chip. This function waits for an operation to complete.
369  */
370 static int
371 ql_wait_cfg(qlge_t *qlge, uint32_t bit)
372 {
373 	return (ql_wait_reg_bit(qlge, REG_CONFIGURATION, bit, BIT_RESET, 0));
374 }
375 
376 
377 /*
378  * Used to issue init control blocks to hw. Maps control block,
379  * sets address, triggers download, waits for completion.
380  */
381 static int
382 ql_write_cfg(qlge_t *qlge, uint32_t bit, uint64_t phy_addr, uint16_t q_id)
383 {
384 	int status = DDI_SUCCESS;
385 	uint32_t mask;
386 	uint32_t value;
387 
388 	status = ql_sem_spinlock(qlge, SEM_ICB_MASK);
389 	if (status != DDI_SUCCESS) {
390 		goto exit;
391 	}
392 	status = ql_wait_cfg(qlge, bit);
393 	if (status != DDI_SUCCESS) {
394 		goto exit;
395 	}
396 
397 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_LOWER, LS_64BITS(phy_addr));
398 	ql_write_reg(qlge, REG_ICB_ACCESS_ADDRESS_UPPER, MS_64BITS(phy_addr));
399 
400 	mask = CFG_Q_MASK | (bit << 16);
401 	value = bit | (q_id << CFG_Q_SHIFT);
402 	ql_write_reg(qlge, REG_CONFIGURATION, (mask | value));
403 
404 	/*
405 	 * Wait for the bit to clear after signaling hw.
406 	 */
407 	status = ql_wait_cfg(qlge, bit);
408 	ql_sem_unlock(qlge, SEM_ICB_MASK); /* does flush too */
409 
410 exit:
411 	return (status);
412 }
413 
414 /*
415  * Initialize adapter instance
416  */
417 static int
418 ql_init_instance(qlge_t *qlge)
419 {
420 	int i;
421 
422 	/* Default value */
423 	qlge->mac_flags = QL_MAC_INIT;
424 	qlge->mtu = ETHERMTU;		/* set normal size as default */
425 	qlge->page_size = VM_PAGE_SIZE;	/* default page size */
426 
427 	for (i = 0; i < MAX_RX_RINGS; i++) {
428 		qlge->rx_polls[i] = 0;
429 		qlge->rx_interrupts[i] = 0;
430 	}
431 
432 	/*
433 	 * Set up the operating parameters.
434 	 */
435 	qlge->multicast_list_count = 0;
436 
437 	/*
438 	 * Set up the max number of unicast list
439 	 */
440 	qlge->unicst_total = MAX_UNICAST_LIST_SIZE;
441 	qlge->unicst_avail = MAX_UNICAST_LIST_SIZE;
442 
443 	/*
444 	 * read user defined properties in .conf file
445 	 */
446 	ql_read_conf(qlge); /* mtu, pause, LSO etc */
447 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
448 
449 	QL_PRINT(DBG_INIT, ("mtu is %d \n", qlge->mtu));
450 
451 	/* choose Memory Space mapping and get Vendor Id, Device ID etc */
452 	ql_pci_config(qlge);
453 	qlge->ip_hdr_offset = 0;
454 
455 	if (qlge->device_id == 0x8000) {
456 		/* Schultz card */
457 		qlge->cfg_flags |= CFG_CHIP_8100;
458 		/* enable just ipv4 chksum offload for Schultz */
459 		qlge->cfg_flags |= CFG_CKSUM_FULL_IPv4;
460 		/*
461 		 * Schultz firmware does not do pseduo IP header checksum
462 		 * calculation, needed to be done by driver
463 		 */
464 		qlge->cfg_flags |= CFG_HW_UNABLE_PSEUDO_HDR_CKSUM;
465 		if (qlge->lso_enable)
466 			qlge->cfg_flags |= CFG_LSO;
467 		qlge->cfg_flags |= CFG_SUPPORT_SCATTER_GATHER;
468 		/* Schultz must split packet header */
469 		qlge->cfg_flags |= CFG_ENABLE_SPLIT_HEADER;
470 		qlge->max_read_mbx = 5;
471 		qlge->ip_hdr_offset = 2;
472 	}
473 
474 	/* Set Function Number and some of the iocb mac information */
475 	if (ql_set_mac_info(qlge) != DDI_SUCCESS)
476 		return (DDI_FAILURE);
477 
478 	/* Read network settings from NVRAM */
479 	/* After nvram is read successfully, update dev_addr */
480 	if (ql_get_flash_params(qlge) == DDI_SUCCESS) {
481 		QL_PRINT(DBG_INIT, ("mac%d address is \n", qlge->func_number));
482 		for (i = 0; i < ETHERADDRL; i++) {
483 			qlge->dev_addr.ether_addr_octet[i] =
484 			    qlge->nic_config.factory_MAC[i];
485 		}
486 	} else {
487 		cmn_err(CE_WARN, "%s(%d): Failed to read flash memory",
488 		    __func__, qlge->instance);
489 		return (DDI_FAILURE);
490 	}
491 
492 	bcopy(qlge->dev_addr.ether_addr_octet,
493 	    qlge->unicst_addr[0].addr.ether_addr_octet,
494 	    ETHERADDRL);
495 	QL_DUMP(DBG_INIT, "\t flash mac address dump:\n",
496 	    &qlge->dev_addr.ether_addr_octet[0], 8, ETHERADDRL);
497 
498 	qlge->port_link_state = LS_DOWN;
499 
500 	return (DDI_SUCCESS);
501 }
502 
503 
504 /*
505  * This hardware semaphore provides the mechanism for exclusive access to
506  * resources shared between the NIC driver, MPI firmware,
507  * FCOE firmware and the FC driver.
508  */
509 static int
510 ql_sem_trylock(qlge_t *qlge, uint32_t sem_mask)
511 {
512 	uint32_t sem_bits = 0;
513 
514 	switch (sem_mask) {
515 	case SEM_XGMAC0_MASK:
516 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
517 		break;
518 	case SEM_XGMAC1_MASK:
519 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
520 		break;
521 	case SEM_ICB_MASK:
522 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
523 		break;
524 	case SEM_MAC_ADDR_MASK:
525 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
526 		break;
527 	case SEM_FLASH_MASK:
528 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
529 		break;
530 	case SEM_PROBE_MASK:
531 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
532 		break;
533 	case SEM_RT_IDX_MASK:
534 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
535 		break;
536 	case SEM_PROC_REG_MASK:
537 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
538 		break;
539 	default:
540 		cmn_err(CE_WARN, "Bad Semaphore mask!.");
541 		return (DDI_FAILURE);
542 	}
543 
544 	ql_write_reg(qlge, REG_SEMAPHORE, sem_bits | sem_mask);
545 	return (!(ql_read_reg(qlge, REG_SEMAPHORE) & sem_bits));
546 }
547 
548 /*
549  * Lock a specific bit of Semaphore register to gain
550  * access to a particular shared register
551  */
552 int
553 ql_sem_spinlock(qlge_t *qlge, uint32_t sem_mask)
554 {
555 	unsigned int wait_count = 30;
556 
557 	while (wait_count) {
558 		if (!ql_sem_trylock(qlge, sem_mask))
559 			return (DDI_SUCCESS);
560 		qlge_delay(100);
561 		wait_count--;
562 	}
563 	cmn_err(CE_WARN, "%s(%d) sem_mask 0x%x lock timeout ",
564 	    __func__, qlge->instance, sem_mask);
565 	return (DDI_FAILURE);
566 }
567 
568 /*
569  * Unock a specific bit of Semaphore register to release
570  * access to a particular shared register
571  */
572 void
573 ql_sem_unlock(qlge_t *qlge, uint32_t sem_mask)
574 {
575 	ql_write_reg(qlge, REG_SEMAPHORE, sem_mask);
576 	(void) ql_read_reg(qlge, REG_SEMAPHORE);	/* flush */
577 }
578 
579 /*
580  * Get property value from configuration file.
581  *
582  * string = property string pointer.
583  *
584  * Returns:
585  * 0xFFFFFFFF = no property else property value.
586  */
587 static uint32_t
588 ql_get_prop(qlge_t *qlge, char *string)
589 {
590 	char buf[256];
591 	uint32_t data;
592 
593 	/* Get adapter instance parameter. */
594 	(void) sprintf(buf, "hba%d-%s", qlge->instance, string);
595 	data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0, buf,
596 	    (int)0xffffffff);
597 
598 	/* Adapter instance parameter found? */
599 	if (data == 0xffffffff) {
600 		/* No, get default parameter. */
601 		data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, qlge->dip, 0,
602 		    string, (int)0xffffffff);
603 	}
604 
605 	return (data);
606 }
607 
608 /*
609  * Read user setting from configuration file.
610  */
611 static void
612 ql_read_conf(qlge_t *qlge)
613 {
614 	uint32_t data;
615 
616 	/* clear configuration flags */
617 	qlge->cfg_flags = 0;
618 
619 	/* Set up the default ring sizes. */
620 	qlge->tx_ring_size = NUM_TX_RING_ENTRIES;
621 	data = ql_get_prop(qlge, "tx_ring_size");
622 	/* if data is valid */
623 	if ((data != 0xffffffff) && data) {
624 		if (qlge->tx_ring_size != data) {
625 			qlge->tx_ring_size = (uint16_t)data;
626 		}
627 	}
628 
629 	qlge->rx_ring_size = NUM_RX_RING_ENTRIES;
630 	data = ql_get_prop(qlge, "rx_ring_size");
631 	/* if data is valid */
632 	if ((data != 0xffffffff) && data) {
633 		if (qlge->rx_ring_size != data) {
634 			qlge->rx_ring_size = (uint16_t)data;
635 		}
636 	}
637 
638 	qlge->tx_ring_count = 8;
639 	data = ql_get_prop(qlge, "tx_ring_count");
640 	/* if data is valid */
641 	if ((data != 0xffffffff) && data) {
642 		if (qlge->tx_ring_count != data) {
643 			qlge->tx_ring_count = (uint16_t)data;
644 		}
645 	}
646 
647 	qlge->rss_ring_count = 8;
648 	data = ql_get_prop(qlge, "rss_ring_count");
649 	/* if data is valid */
650 	if ((data != 0xffffffff) && data) {
651 		if (qlge->rss_ring_count != data) {
652 			qlge->rss_ring_count = (uint16_t)data;
653 		}
654 	}
655 
656 	/* Get default rx_copy enable/disable. */
657 	if ((data = ql_get_prop(qlge, "force-rx-copy")) == 0xffffffff ||
658 	    data == 0) {
659 		qlge->cfg_flags &= ~CFG_RX_COPY_MODE;
660 		qlge->rx_copy = B_FALSE;
661 		QL_PRINT(DBG_INIT, ("rx copy mode disabled\n"));
662 	} else if (data == 1) {
663 		qlge->cfg_flags |= CFG_RX_COPY_MODE;
664 		qlge->rx_copy = B_TRUE;
665 		QL_PRINT(DBG_INIT, ("rx copy mode enabled\n"));
666 	}
667 
668 	/* Get mtu packet size. */
669 	data = ql_get_prop(qlge, "mtu");
670 	if ((data == ETHERMTU) || (data == JUMBO_MTU)) {
671 		if (qlge->mtu != data) {
672 			qlge->mtu = data;
673 			cmn_err(CE_NOTE, "new mtu is %d\n", qlge->mtu);
674 		}
675 	}
676 
677 	if (qlge->mtu == JUMBO_MTU) {
678 		qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT_JUMBO;
679 		qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT_JUMBO;
680 		qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT_JUMBO;
681 		qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT_JUMBO;
682 	}
683 
684 
685 	/* Get pause mode, default is Per Priority mode. */
686 	qlge->pause = PAUSE_MODE_PER_PRIORITY;
687 	data = ql_get_prop(qlge, "pause");
688 	if (data <= PAUSE_MODE_PER_PRIORITY) {
689 		if (qlge->pause != data) {
690 			qlge->pause = data;
691 			cmn_err(CE_NOTE, "new pause mode %d\n", qlge->pause);
692 		}
693 	}
694 	/* Receive interrupt delay */
695 	qlge->rx_coalesce_usecs = DFLT_RX_COALESCE_WAIT;
696 	data = ql_get_prop(qlge, "rx_intr_delay");
697 	/* if data is valid */
698 	if ((data != 0xffffffff) && data) {
699 		if (qlge->rx_coalesce_usecs != data) {
700 			qlge->rx_coalesce_usecs = (uint16_t)data;
701 		}
702 	}
703 	/* Rx inter-packet delay. */
704 	qlge->rx_max_coalesced_frames = DFLT_RX_INTER_FRAME_WAIT;
705 	data = ql_get_prop(qlge, "rx_ipkt_delay");
706 	/* if data is valid */
707 	if ((data != 0xffffffff) && data) {
708 		if (qlge->rx_max_coalesced_frames != data) {
709 			qlge->rx_max_coalesced_frames = (uint16_t)data;
710 		}
711 	}
712 	/* Transmit interrupt delay */
713 	qlge->tx_coalesce_usecs = DFLT_TX_COALESCE_WAIT;
714 	data = ql_get_prop(qlge, "tx_intr_delay");
715 	/* if data is valid */
716 	if ((data != 0xffffffff) && data) {
717 		if (qlge->tx_coalesce_usecs != data) {
718 			qlge->tx_coalesce_usecs = (uint16_t)data;
719 		}
720 	}
721 	/* Tx inter-packet delay. */
722 	qlge->tx_max_coalesced_frames = DFLT_TX_INTER_FRAME_WAIT;
723 	data = ql_get_prop(qlge, "tx_ipkt_delay");
724 	/* if data is valid */
725 	if ((data != 0xffffffff) && data) {
726 		if (qlge->tx_max_coalesced_frames != data) {
727 			qlge->tx_max_coalesced_frames = (uint16_t)data;
728 		}
729 	}
730 
731 	/* Get split header payload_copy_thresh. */
732 	qlge->payload_copy_thresh = DFLT_PAYLOAD_COPY_THRESH;
733 	data = ql_get_prop(qlge, "payload_copy_thresh");
734 	/* if data is valid */
735 	if ((data != 0xffffffff) && (data != 0)) {
736 		if (qlge->payload_copy_thresh != data) {
737 			qlge->payload_copy_thresh = data;
738 		}
739 	}
740 
741 	/* large send offload (LSO) capability. */
742 	qlge->lso_enable = 1;
743 	data = ql_get_prop(qlge, "lso_enable");
744 	/* if data is valid */
745 	if ((data == 0) || (data == 1)) {
746 		if (qlge->lso_enable != data) {
747 			qlge->lso_enable = (uint16_t)data;
748 		}
749 	}
750 
751 	/* dcbx capability. */
752 	qlge->dcbx_enable = 1;
753 	data = ql_get_prop(qlge, "dcbx_enable");
754 	/* if data is valid */
755 	if ((data == 0) || (data == 1)) {
756 		if (qlge->dcbx_enable != data) {
757 			qlge->dcbx_enable = (uint16_t)data;
758 		}
759 	}
760 	/* fault management enable */
761 	qlge->fm_enable = B_TRUE;
762 	data = ql_get_prop(qlge, "fm-enable");
763 	if ((data == 0x1) || (data == 0)) {
764 		qlge->fm_enable = (boolean_t)data;
765 	}
766 
767 }
768 
769 /*
770  * Enable global interrupt
771  */
772 static void
773 ql_enable_global_interrupt(qlge_t *qlge)
774 {
775 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE,
776 	    (INTR_EN_EI << 16) | INTR_EN_EI);
777 	qlge->flags |= INTERRUPTS_ENABLED;
778 }
779 
780 /*
781  * Disable global interrupt
782  */
783 static void
784 ql_disable_global_interrupt(qlge_t *qlge)
785 {
786 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, (INTR_EN_EI << 16));
787 	qlge->flags &= ~INTERRUPTS_ENABLED;
788 }
789 
790 /*
791  * Enable one ring interrupt
792  */
793 void
794 ql_enable_completion_interrupt(qlge_t *qlge, uint32_t intr)
795 {
796 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
797 
798 	QL_PRINT(DBG_INTR, ("%s(%d): To enable intr %d, irq_cnt %d \n",
799 	    __func__, qlge->instance, intr, ctx->irq_cnt));
800 
801 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
802 		/*
803 		 * Always enable if we're MSIX multi interrupts and
804 		 * it's not the default (zeroeth) interrupt.
805 		 */
806 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
807 		return;
808 	}
809 
810 	if (!atomic_dec_32_nv(&ctx->irq_cnt)) {
811 		mutex_enter(&qlge->hw_mutex);
812 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_en_mask);
813 		mutex_exit(&qlge->hw_mutex);
814 		QL_PRINT(DBG_INTR,
815 		    ("%s(%d): write %x to intr enable register \n",
816 		    __func__, qlge->instance, ctx->intr_en_mask));
817 	}
818 }
819 
820 /*
821  * ql_forced_disable_completion_interrupt
822  * Used by call from OS, may be called without
823  * a pending interrupt so force the disable
824  */
825 uint32_t
826 ql_forced_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
827 {
828 	uint32_t var = 0;
829 	struct intr_ctx *ctx = qlge->intr_ctx + intr;
830 
831 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
832 	    __func__, qlge->instance, intr, ctx->irq_cnt));
833 
834 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && intr) {
835 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
836 		var = ql_read_reg(qlge, REG_STATUS);
837 		return (var);
838 	}
839 
840 	mutex_enter(&qlge->hw_mutex);
841 	ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
842 	var = ql_read_reg(qlge, REG_STATUS);
843 	mutex_exit(&qlge->hw_mutex);
844 
845 	return (var);
846 }
847 
848 /*
849  * Disable a completion interrupt
850  */
851 void
852 ql_disable_completion_interrupt(qlge_t *qlge, uint32_t intr)
853 {
854 	struct intr_ctx *ctx;
855 
856 	ctx = qlge->intr_ctx + intr;
857 	QL_PRINT(DBG_INTR, ("%s(%d): To disable intr %d, irq_cnt %d \n",
858 	    __func__, qlge->instance, intr, ctx->irq_cnt));
859 	/*
860 	 * HW disables for us if we're MSIX multi interrupts and
861 	 * it's not the default (zeroeth) interrupt.
862 	 */
863 	if ((qlge->intr_type == DDI_INTR_TYPE_MSIX) && (intr != 0))
864 		return;
865 
866 	if (ql_atomic_read_32(&ctx->irq_cnt) == 0) {
867 		mutex_enter(&qlge->hw_mutex);
868 		ql_write_reg(qlge, REG_INTERRUPT_ENABLE, ctx->intr_dis_mask);
869 		mutex_exit(&qlge->hw_mutex);
870 	}
871 	atomic_inc_32(&ctx->irq_cnt);
872 }
873 
874 /*
875  * Enable all completion interrupts
876  */
877 static void
878 ql_enable_all_completion_interrupts(qlge_t *qlge)
879 {
880 	int i;
881 	uint32_t value = 1;
882 
883 	for (i = 0; i < qlge->intr_cnt; i++) {
884 		/*
885 		 * Set the count to 1 for Legacy / MSI interrupts or for the
886 		 * default interrupt (0)
887 		 */
888 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0) {
889 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
890 		}
891 		ql_enable_completion_interrupt(qlge, i);
892 	}
893 }
894 
895 /*
896  * Disable all completion interrupts
897  */
898 static void
899 ql_disable_all_completion_interrupts(qlge_t *qlge)
900 {
901 	int i;
902 	uint32_t value = 0;
903 
904 	for (i = 0; i < qlge->intr_cnt; i++) {
905 
906 		/*
907 		 * Set the count to 0 for Legacy / MSI interrupts or for the
908 		 * default interrupt (0)
909 		 */
910 		if ((qlge->intr_type != DDI_INTR_TYPE_MSIX) || i == 0)
911 			ql_atomic_set_32(&qlge->intr_ctx[i].irq_cnt, value);
912 
913 		ql_disable_completion_interrupt(qlge, i);
914 	}
915 }
916 
917 /*
918  * Update small buffer queue producer index
919  */
920 static void
921 ql_update_sbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
922 {
923 	/* Update the buffer producer index */
924 	QL_PRINT(DBG_RX, ("sbq: updating prod idx = %d.\n",
925 	    rx_ring->sbq_prod_idx));
926 	ql_write_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg,
927 	    rx_ring->sbq_prod_idx);
928 }
929 
930 /*
931  * Update large buffer queue producer index
932  */
933 static void
934 ql_update_lbq_prod_idx(qlge_t *qlge, struct rx_ring *rx_ring)
935 {
936 	/* Update the buffer producer index */
937 	QL_PRINT(DBG_RX, ("lbq: updating prod idx = %d.\n",
938 	    rx_ring->lbq_prod_idx));
939 	ql_write_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg,
940 	    rx_ring->lbq_prod_idx);
941 }
942 
943 /*
944  * Adds a small buffer descriptor to end of its in use list,
945  * assumes sbq_lock is already taken
946  */
947 static void
948 ql_add_sbuf_to_in_use_list(struct rx_ring *rx_ring,
949     struct bq_desc *sbq_desc)
950 {
951 	uint32_t inuse_idx = rx_ring->sbq_use_tail;
952 
953 	rx_ring->sbuf_in_use[inuse_idx] = sbq_desc;
954 	inuse_idx++;
955 	if (inuse_idx >= rx_ring->sbq_len)
956 		inuse_idx = 0;
957 	rx_ring->sbq_use_tail = inuse_idx;
958 	atomic_inc_32(&rx_ring->sbuf_in_use_count);
959 	ASSERT(rx_ring->sbuf_in_use_count <= rx_ring->sbq_len);
960 }
961 
962 /*
963  * Get a small buffer descriptor from its in use list
964  */
965 static struct bq_desc *
966 ql_get_sbuf_from_in_use_list(struct rx_ring *rx_ring)
967 {
968 	struct bq_desc *sbq_desc = NULL;
969 	uint32_t inuse_idx;
970 
971 	/* Pick from head of in use list */
972 	inuse_idx = rx_ring->sbq_use_head;
973 	sbq_desc = rx_ring->sbuf_in_use[inuse_idx];
974 	rx_ring->sbuf_in_use[inuse_idx] = NULL;
975 
976 	if (sbq_desc != NULL) {
977 		inuse_idx++;
978 		if (inuse_idx >= rx_ring->sbq_len)
979 			inuse_idx = 0;
980 		rx_ring->sbq_use_head = inuse_idx;
981 		atomic_dec_32(&rx_ring->sbuf_in_use_count);
982 		atomic_inc_32(&rx_ring->rx_indicate);
983 		sbq_desc->upl_inuse = 1;
984 		/* if mp is NULL */
985 		if (sbq_desc->mp == NULL) {
986 			/* try to remap mp again */
987 			sbq_desc->mp =
988 			    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
989 			    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
990 		}
991 	}
992 
993 	return (sbq_desc);
994 }
995 
996 /*
997  * Add a small buffer descriptor to its free list
998  */
999 static void
1000 ql_add_sbuf_to_free_list(struct rx_ring *rx_ring,
1001     struct bq_desc *sbq_desc)
1002 {
1003 	uint32_t free_idx;
1004 
1005 	/* Add to the end of free list */
1006 	free_idx = rx_ring->sbq_free_tail;
1007 	rx_ring->sbuf_free[free_idx] = sbq_desc;
1008 	ASSERT(rx_ring->sbuf_free_count <= rx_ring->sbq_len);
1009 	free_idx++;
1010 	if (free_idx >= rx_ring->sbq_len)
1011 		free_idx = 0;
1012 	rx_ring->sbq_free_tail = free_idx;
1013 	atomic_inc_32(&rx_ring->sbuf_free_count);
1014 }
1015 
1016 /*
1017  * Get a small buffer descriptor from its free list
1018  */
1019 static struct bq_desc *
1020 ql_get_sbuf_from_free_list(struct rx_ring *rx_ring)
1021 {
1022 	struct bq_desc *sbq_desc;
1023 	uint32_t free_idx;
1024 
1025 	free_idx = rx_ring->sbq_free_head;
1026 	/* Pick from top of free list */
1027 	sbq_desc = rx_ring->sbuf_free[free_idx];
1028 	rx_ring->sbuf_free[free_idx] = NULL;
1029 	if (sbq_desc != NULL) {
1030 		free_idx++;
1031 		if (free_idx >= rx_ring->sbq_len)
1032 			free_idx = 0;
1033 		rx_ring->sbq_free_head = free_idx;
1034 		atomic_dec_32(&rx_ring->sbuf_free_count);
1035 		ASSERT(rx_ring->sbuf_free_count != 0);
1036 	}
1037 	return (sbq_desc);
1038 }
1039 
1040 /*
1041  * Add a large buffer descriptor to its in use list
1042  */
1043 static void
1044 ql_add_lbuf_to_in_use_list(struct rx_ring *rx_ring,
1045     struct bq_desc *lbq_desc)
1046 {
1047 	uint32_t inuse_idx;
1048 
1049 	inuse_idx = rx_ring->lbq_use_tail;
1050 
1051 	rx_ring->lbuf_in_use[inuse_idx] = lbq_desc;
1052 	inuse_idx++;
1053 	if (inuse_idx >= rx_ring->lbq_len)
1054 		inuse_idx = 0;
1055 	rx_ring->lbq_use_tail = inuse_idx;
1056 	atomic_inc_32(&rx_ring->lbuf_in_use_count);
1057 }
1058 
1059 /*
1060  * Get a large buffer descriptor from in use list
1061  */
1062 static struct bq_desc *
1063 ql_get_lbuf_from_in_use_list(struct rx_ring *rx_ring)
1064 {
1065 	struct bq_desc *lbq_desc;
1066 	uint32_t inuse_idx;
1067 
1068 	/* Pick from head of in use list */
1069 	inuse_idx = rx_ring->lbq_use_head;
1070 	lbq_desc = rx_ring->lbuf_in_use[inuse_idx];
1071 	rx_ring->lbuf_in_use[inuse_idx] = NULL;
1072 
1073 	if (lbq_desc != NULL) {
1074 		inuse_idx++;
1075 		if (inuse_idx >= rx_ring->lbq_len)
1076 			inuse_idx = 0;
1077 		rx_ring->lbq_use_head = inuse_idx;
1078 		atomic_dec_32(&rx_ring->lbuf_in_use_count);
1079 		atomic_inc_32(&rx_ring->rx_indicate);
1080 		lbq_desc->upl_inuse = 1;
1081 
1082 		/* if mp is NULL */
1083 		if (lbq_desc->mp == NULL) {
1084 			/* try to remap mp again */
1085 			lbq_desc->mp =
1086 			    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1087 			    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1088 		}
1089 	}
1090 	return (lbq_desc);
1091 }
1092 
1093 /*
1094  * Add a large buffer descriptor to free list
1095  */
1096 static void
1097 ql_add_lbuf_to_free_list(struct rx_ring *rx_ring,
1098     struct bq_desc *lbq_desc)
1099 {
1100 	uint32_t free_idx;
1101 
1102 	/* Add to the end of free list */
1103 	free_idx = rx_ring->lbq_free_tail;
1104 	rx_ring->lbuf_free[free_idx] = lbq_desc;
1105 	free_idx++;
1106 	if (free_idx >= rx_ring->lbq_len)
1107 		free_idx = 0;
1108 	rx_ring->lbq_free_tail = free_idx;
1109 	atomic_inc_32(&rx_ring->lbuf_free_count);
1110 	ASSERT(rx_ring->lbuf_free_count <= rx_ring->lbq_len);
1111 }
1112 
1113 /*
1114  * Get a large buffer descriptor from its free list
1115  */
1116 static struct bq_desc *
1117 ql_get_lbuf_from_free_list(struct rx_ring *rx_ring)
1118 {
1119 	struct bq_desc *lbq_desc;
1120 	uint32_t free_idx;
1121 
1122 	free_idx = rx_ring->lbq_free_head;
1123 	/* Pick from head of free list */
1124 	lbq_desc = rx_ring->lbuf_free[free_idx];
1125 	rx_ring->lbuf_free[free_idx] = NULL;
1126 
1127 	if (lbq_desc != NULL) {
1128 		free_idx++;
1129 		if (free_idx >= rx_ring->lbq_len)
1130 			free_idx = 0;
1131 		rx_ring->lbq_free_head = free_idx;
1132 		atomic_dec_32(&rx_ring->lbuf_free_count);
1133 		ASSERT(rx_ring->lbuf_free_count != 0);
1134 	}
1135 	return (lbq_desc);
1136 }
1137 
1138 /*
1139  * Add a small buffer descriptor to free list
1140  */
1141 static void
1142 ql_refill_sbuf_free_list(struct bq_desc *sbq_desc, boolean_t alloc_memory)
1143 {
1144 	struct rx_ring *rx_ring = sbq_desc->rx_ring;
1145 	uint64_t *sbq_entry;
1146 	qlge_t *qlge = (qlge_t *)rx_ring->qlge;
1147 	/*
1148 	 * Sync access
1149 	 */
1150 	mutex_enter(&rx_ring->sbq_lock);
1151 
1152 	sbq_desc->upl_inuse = 0;
1153 
1154 	/*
1155 	 * If we are freeing the buffers as a result of adapter unload, get out
1156 	 */
1157 	if ((sbq_desc->free_buf != NULL) ||
1158 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1159 		if (sbq_desc->free_buf == NULL)
1160 			atomic_dec_32(&rx_ring->rx_indicate);
1161 		mutex_exit(&rx_ring->sbq_lock);
1162 		return;
1163 	}
1164 #ifdef QLGE_LOAD_UNLOAD
1165 	if (rx_ring->rx_indicate == 0)
1166 		cmn_err(CE_WARN, "sbq: indicate wrong");
1167 #endif
1168 #ifdef QLGE_TRACK_BUFFER_USAGE
1169 	uint32_t sb_consumer_idx;
1170 	uint32_t sb_producer_idx;
1171 	uint32_t num_free_buffers;
1172 	uint32_t temp;
1173 
1174 	temp = ql_read_doorbell_reg(qlge, rx_ring->sbq_prod_idx_db_reg);
1175 	sb_producer_idx = temp & 0x0000ffff;
1176 	sb_consumer_idx = (temp >> 16);
1177 
1178 	if (sb_consumer_idx > sb_producer_idx)
1179 		num_free_buffers = NUM_SMALL_BUFFERS -
1180 		    (sb_consumer_idx - sb_producer_idx);
1181 	else
1182 		num_free_buffers = sb_producer_idx - sb_consumer_idx;
1183 
1184 	if (num_free_buffers < qlge->rx_sb_low_count[rx_ring->cq_id])
1185 		qlge->rx_sb_low_count[rx_ring->cq_id] = num_free_buffers;
1186 
1187 #endif
1188 
1189 #ifdef QLGE_LOAD_UNLOAD
1190 	if (rx_ring->rx_indicate > 0xFF000000)
1191 		cmn_err(CE_WARN, "sbq: indicate(%d) wrong: %d mac_flags %d,"
1192 		    " sbq_desc index %d.",
1193 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1194 		    sbq_desc->index);
1195 #endif
1196 	if (alloc_memory) {
1197 		sbq_desc->mp =
1198 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1199 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1200 		if (sbq_desc->mp == NULL) {
1201 			rx_ring->rx_failed_sbq_allocs++;
1202 		}
1203 	}
1204 
1205 	/* Got the packet from the stack decrement rx_indicate count */
1206 	atomic_dec_32(&rx_ring->rx_indicate);
1207 
1208 	ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1209 
1210 	/* Rearm if possible */
1211 	if ((rx_ring->sbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1212 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1213 		sbq_entry = rx_ring->sbq_dma.vaddr;
1214 		sbq_entry += rx_ring->sbq_prod_idx;
1215 
1216 		while (rx_ring->sbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1217 			/* Get first one from free list */
1218 			sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
1219 
1220 			*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
1221 			sbq_entry++;
1222 			rx_ring->sbq_prod_idx++;
1223 			if (rx_ring->sbq_prod_idx >= rx_ring->sbq_len) {
1224 				rx_ring->sbq_prod_idx = 0;
1225 				sbq_entry = rx_ring->sbq_dma.vaddr;
1226 			}
1227 			/* Add to end of in use list */
1228 			ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
1229 		}
1230 
1231 		/* Update small buffer queue producer index */
1232 		ql_update_sbq_prod_idx(qlge, rx_ring);
1233 	}
1234 
1235 	mutex_exit(&rx_ring->sbq_lock);
1236 	QL_PRINT(DBG_RX_RING, ("%s(%d) exited, sbuf_free_count %d\n",
1237 	    __func__, qlge->instance, rx_ring->sbuf_free_count));
1238 }
1239 
1240 /*
1241  * rx recycle call back function
1242  */
1243 static void
1244 ql_release_to_sbuf_free_list(caddr_t p)
1245 {
1246 	struct bq_desc *sbq_desc = (struct bq_desc *)(void *)p;
1247 
1248 	if (sbq_desc == NULL)
1249 		return;
1250 	ql_refill_sbuf_free_list(sbq_desc, B_TRUE);
1251 }
1252 
1253 /*
1254  * Add a large buffer descriptor to free list
1255  */
1256 static void
1257 ql_refill_lbuf_free_list(struct bq_desc *lbq_desc, boolean_t alloc_memory)
1258 {
1259 	struct rx_ring *rx_ring = lbq_desc->rx_ring;
1260 	uint64_t *lbq_entry;
1261 	qlge_t *qlge = rx_ring->qlge;
1262 
1263 	/* Sync access */
1264 	mutex_enter(&rx_ring->lbq_lock);
1265 
1266 	lbq_desc->upl_inuse = 0;
1267 	/*
1268 	 * If we are freeing the buffers as a result of adapter unload, get out
1269 	 */
1270 	if ((lbq_desc->free_buf != NULL) ||
1271 	    (qlge->mac_flags == QL_MAC_DETACH)) {
1272 		if (lbq_desc->free_buf == NULL)
1273 			atomic_dec_32(&rx_ring->rx_indicate);
1274 		mutex_exit(&rx_ring->lbq_lock);
1275 		return;
1276 	}
1277 #ifdef QLGE_LOAD_UNLOAD
1278 	if (rx_ring->rx_indicate == 0)
1279 		cmn_err(CE_WARN, "lbq: indicate wrong");
1280 #endif
1281 #ifdef QLGE_TRACK_BUFFER_USAGE
1282 	uint32_t lb_consumer_idx;
1283 	uint32_t lb_producer_idx;
1284 	uint32_t num_free_buffers;
1285 	uint32_t temp;
1286 
1287 	temp = ql_read_doorbell_reg(qlge, rx_ring->lbq_prod_idx_db_reg);
1288 
1289 	lb_producer_idx = temp & 0x0000ffff;
1290 	lb_consumer_idx = (temp >> 16);
1291 
1292 	if (lb_consumer_idx > lb_producer_idx)
1293 		num_free_buffers = NUM_LARGE_BUFFERS -
1294 		    (lb_consumer_idx - lb_producer_idx);
1295 	else
1296 		num_free_buffers = lb_producer_idx - lb_consumer_idx;
1297 
1298 	if (num_free_buffers < qlge->rx_lb_low_count[rx_ring->cq_id]) {
1299 		qlge->rx_lb_low_count[rx_ring->cq_id] = num_free_buffers;
1300 	}
1301 #endif
1302 
1303 #ifdef QLGE_LOAD_UNLOAD
1304 	if (rx_ring->rx_indicate > 0xFF000000)
1305 		cmn_err(CE_WARN, "lbq: indicate(%d) wrong: %d mac_flags %d,"
1306 		    "lbq_desc index %d",
1307 		    rx_ring->cq_id, rx_ring->rx_indicate, rx_ring->mac_flags,
1308 		    lbq_desc->index);
1309 #endif
1310 	if (alloc_memory) {
1311 		lbq_desc->mp =
1312 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1313 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1314 		if (lbq_desc->mp == NULL) {
1315 			rx_ring->rx_failed_lbq_allocs++;
1316 		}
1317 	}
1318 
1319 	/* Got the packet from the stack decrement rx_indicate count */
1320 	atomic_dec_32(&rx_ring->rx_indicate);
1321 
1322 	ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1323 
1324 	/* Rearm if possible */
1325 	if ((rx_ring->lbuf_free_count >= MIN_BUFFERS_FREE_COUNT) &&
1326 	    (qlge->mac_flags == QL_MAC_STARTED)) {
1327 		lbq_entry = rx_ring->lbq_dma.vaddr;
1328 		lbq_entry += rx_ring->lbq_prod_idx;
1329 		while (rx_ring->lbuf_free_count > MIN_BUFFERS_ARM_COUNT) {
1330 			/* Get first one from free list */
1331 			lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
1332 
1333 			*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
1334 			lbq_entry++;
1335 			rx_ring->lbq_prod_idx++;
1336 			if (rx_ring->lbq_prod_idx >= rx_ring->lbq_len) {
1337 				rx_ring->lbq_prod_idx = 0;
1338 				lbq_entry = rx_ring->lbq_dma.vaddr;
1339 			}
1340 
1341 			/* Add to end of in use list */
1342 			ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
1343 		}
1344 
1345 		/* Update large buffer queue producer index */
1346 		ql_update_lbq_prod_idx(rx_ring->qlge, rx_ring);
1347 	}
1348 
1349 	mutex_exit(&rx_ring->lbq_lock);
1350 	QL_PRINT(DBG_RX_RING, ("%s exitd, lbuf_free_count %d\n",
1351 	    __func__, rx_ring->lbuf_free_count));
1352 }
1353 /*
1354  * rx recycle call back function
1355  */
1356 static void
1357 ql_release_to_lbuf_free_list(caddr_t p)
1358 {
1359 	struct bq_desc *lbq_desc = (struct bq_desc *)(void *)p;
1360 
1361 	if (lbq_desc == NULL)
1362 		return;
1363 	ql_refill_lbuf_free_list(lbq_desc, B_TRUE);
1364 }
1365 
1366 /*
1367  * free small buffer queue buffers
1368  */
1369 static void
1370 ql_free_sbq_buffers(struct rx_ring *rx_ring)
1371 {
1372 	struct bq_desc *sbq_desc;
1373 	uint32_t i;
1374 	uint32_t j = rx_ring->sbq_free_head;
1375 	int  force_cnt = 0;
1376 
1377 	for (i = 0; i < rx_ring->sbuf_free_count; i++) {
1378 		sbq_desc = rx_ring->sbuf_free[j];
1379 		sbq_desc->free_buf = 1;
1380 		j++;
1381 		if (j >= rx_ring->sbq_len) {
1382 			j = 0;
1383 		}
1384 		if (sbq_desc->mp != NULL) {
1385 			freemsg(sbq_desc->mp);
1386 			sbq_desc->mp = NULL;
1387 		}
1388 	}
1389 	rx_ring->sbuf_free_count = 0;
1390 
1391 	j = rx_ring->sbq_use_head;
1392 	for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
1393 		sbq_desc = rx_ring->sbuf_in_use[j];
1394 		sbq_desc->free_buf = 1;
1395 		j++;
1396 		if (j >= rx_ring->sbq_len) {
1397 			j = 0;
1398 		}
1399 		if (sbq_desc->mp != NULL) {
1400 			freemsg(sbq_desc->mp);
1401 			sbq_desc->mp = NULL;
1402 		}
1403 	}
1404 	rx_ring->sbuf_in_use_count = 0;
1405 
1406 	sbq_desc = &rx_ring->sbq_desc[0];
1407 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1408 		/*
1409 		 * Set flag so that the callback does not allocate a new buffer
1410 		 */
1411 		sbq_desc->free_buf = 1;
1412 		if (sbq_desc->upl_inuse != 0) {
1413 			force_cnt++;
1414 		}
1415 		if (sbq_desc->bd_dma.dma_handle != NULL) {
1416 			ql_free_phys(&sbq_desc->bd_dma.dma_handle,
1417 			    &sbq_desc->bd_dma.acc_handle);
1418 			sbq_desc->bd_dma.dma_handle = NULL;
1419 			sbq_desc->bd_dma.acc_handle = NULL;
1420 		}
1421 	}
1422 #ifdef QLGE_LOAD_UNLOAD
1423 	cmn_err(CE_NOTE, "sbq: free %d inuse %d force %d\n",
1424 	    rx_ring->sbuf_free_count, rx_ring->sbuf_in_use_count, force_cnt);
1425 #endif
1426 	if (rx_ring->sbuf_in_use != NULL) {
1427 		kmem_free(rx_ring->sbuf_in_use, (rx_ring->sbq_len *
1428 		    sizeof (struct bq_desc *)));
1429 		rx_ring->sbuf_in_use = NULL;
1430 	}
1431 
1432 	if (rx_ring->sbuf_free != NULL) {
1433 		kmem_free(rx_ring->sbuf_free, (rx_ring->sbq_len *
1434 		    sizeof (struct bq_desc *)));
1435 		rx_ring->sbuf_free = NULL;
1436 	}
1437 }
1438 
1439 /* Allocate small buffers */
1440 static int
1441 ql_alloc_sbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1442 {
1443 	struct bq_desc *sbq_desc;
1444 	int i;
1445 	ddi_dma_cookie_t dma_cookie;
1446 
1447 	rx_ring->sbuf_free = kmem_zalloc(rx_ring->sbq_len *
1448 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1449 	if (rx_ring->sbuf_free == NULL) {
1450 		cmn_err(CE_WARN,
1451 		    "!%s: sbuf_free_list alloc: failed",
1452 		    __func__);
1453 		rx_ring->sbuf_free_count = 0;
1454 		goto alloc_sbuf_err;
1455 	}
1456 
1457 	rx_ring->sbuf_in_use = kmem_zalloc(rx_ring->sbq_len *
1458 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1459 	if (rx_ring->sbuf_in_use == NULL) {
1460 		cmn_err(CE_WARN,
1461 		    "!%s: sbuf_inuse_list alloc: failed",
1462 		    __func__);
1463 		rx_ring->sbuf_in_use_count = 0;
1464 		goto alloc_sbuf_err;
1465 	}
1466 	rx_ring->sbq_use_head = 0;
1467 	rx_ring->sbq_use_tail = 0;
1468 	rx_ring->sbq_free_head = 0;
1469 	rx_ring->sbq_free_tail = 0;
1470 	sbq_desc = &rx_ring->sbq_desc[0];
1471 
1472 	for (i = 0; i < rx_ring->sbq_len; i++, sbq_desc++) {
1473 		/* Allocate buffer */
1474 		if (ql_alloc_phys_rbuf(qlge->dip, &sbq_desc->bd_dma.dma_handle,
1475 		    &ql_buf_acc_attr,
1476 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1477 		    &sbq_desc->bd_dma.acc_handle,
1478 		    (size_t)rx_ring->sbq_buf_size,	/* mem size */
1479 		    (size_t)0,				/* default alignment */
1480 		    (caddr_t *)&sbq_desc->bd_dma.vaddr,
1481 		    &dma_cookie) != 0) {
1482 			cmn_err(CE_WARN,
1483 			    "!%s: ddi_dma_alloc_handle: failed",
1484 			    __func__);
1485 			goto alloc_sbuf_err;
1486 		}
1487 
1488 		/* Set context for Return buffer callback */
1489 		sbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1490 		sbq_desc->rx_recycle.free_func = ql_release_to_sbuf_free_list;
1491 		sbq_desc->rx_recycle.free_arg  = (caddr_t)sbq_desc;
1492 		sbq_desc->rx_ring = rx_ring;
1493 		sbq_desc->upl_inuse = 0;
1494 		sbq_desc->free_buf = 0;
1495 
1496 		sbq_desc->mp =
1497 		    desballoc((unsigned char *)(sbq_desc->bd_dma.vaddr),
1498 		    rx_ring->sbq_buf_size, 0, &sbq_desc->rx_recycle);
1499 		if (sbq_desc->mp == NULL) {
1500 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1501 			goto alloc_sbuf_err;
1502 		}
1503 		ql_add_sbuf_to_free_list(rx_ring, sbq_desc);
1504 	}
1505 
1506 	return (DDI_SUCCESS);
1507 
1508 alloc_sbuf_err:
1509 	ql_free_sbq_buffers(rx_ring);
1510 	return (DDI_FAILURE);
1511 }
1512 
1513 static void
1514 ql_free_lbq_buffers(struct rx_ring *rx_ring)
1515 {
1516 	struct bq_desc *lbq_desc;
1517 	uint32_t i, j;
1518 	int force_cnt = 0;
1519 
1520 	j = rx_ring->lbq_free_head;
1521 	for (i = 0; i < rx_ring->lbuf_free_count; i++) {
1522 		lbq_desc = rx_ring->lbuf_free[j];
1523 		lbq_desc->free_buf = 1;
1524 		j++;
1525 		if (j >= rx_ring->lbq_len)
1526 			j = 0;
1527 		if (lbq_desc->mp != NULL) {
1528 			freemsg(lbq_desc->mp);
1529 			lbq_desc->mp = NULL;
1530 		}
1531 	}
1532 	rx_ring->lbuf_free_count = 0;
1533 
1534 	j = rx_ring->lbq_use_head;
1535 	for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
1536 		lbq_desc = rx_ring->lbuf_in_use[j];
1537 		lbq_desc->free_buf = 1;
1538 		j++;
1539 		if (j >= rx_ring->lbq_len) {
1540 			j = 0;
1541 		}
1542 		if (lbq_desc->mp != NULL) {
1543 			freemsg(lbq_desc->mp);
1544 			lbq_desc->mp = NULL;
1545 		}
1546 	}
1547 	rx_ring->lbuf_in_use_count = 0;
1548 
1549 	lbq_desc = &rx_ring->lbq_desc[0];
1550 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1551 		/* Set flag so that callback will not allocate a new buffer */
1552 		lbq_desc->free_buf = 1;
1553 		if (lbq_desc->upl_inuse != 0) {
1554 			force_cnt++;
1555 		}
1556 		if (lbq_desc->bd_dma.dma_handle != NULL) {
1557 			ql_free_phys(&lbq_desc->bd_dma.dma_handle,
1558 			    &lbq_desc->bd_dma.acc_handle);
1559 			lbq_desc->bd_dma.dma_handle = NULL;
1560 			lbq_desc->bd_dma.acc_handle = NULL;
1561 		}
1562 	}
1563 #ifdef QLGE_LOAD_UNLOAD
1564 	if (force_cnt) {
1565 		cmn_err(CE_WARN, "lbq: free %d inuse %d force %d",
1566 		    rx_ring->lbuf_free_count, rx_ring->lbuf_in_use_count,
1567 		    force_cnt);
1568 	}
1569 #endif
1570 	if (rx_ring->lbuf_in_use != NULL) {
1571 		kmem_free(rx_ring->lbuf_in_use, (rx_ring->lbq_len *
1572 		    sizeof (struct bq_desc *)));
1573 		rx_ring->lbuf_in_use = NULL;
1574 	}
1575 
1576 	if (rx_ring->lbuf_free != NULL) {
1577 		kmem_free(rx_ring->lbuf_free, (rx_ring->lbq_len *
1578 		    sizeof (struct bq_desc *)));
1579 		rx_ring->lbuf_free = NULL;
1580 	}
1581 }
1582 
1583 /* Allocate large buffers */
1584 static int
1585 ql_alloc_lbufs(qlge_t *qlge, struct rx_ring *rx_ring)
1586 {
1587 	struct bq_desc *lbq_desc;
1588 	ddi_dma_cookie_t dma_cookie;
1589 	int i;
1590 	uint32_t lbq_buf_size;
1591 
1592 	rx_ring->lbuf_free = kmem_zalloc(rx_ring->lbq_len *
1593 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1594 	if (rx_ring->lbuf_free == NULL) {
1595 		cmn_err(CE_WARN,
1596 		    "!%s: lbuf_free_list alloc: failed",
1597 		    __func__);
1598 		rx_ring->lbuf_free_count = 0;
1599 		goto alloc_lbuf_err;
1600 	}
1601 
1602 	rx_ring->lbuf_in_use = kmem_zalloc(rx_ring->lbq_len *
1603 	    sizeof (struct bq_desc *), KM_NOSLEEP);
1604 
1605 	if (rx_ring->lbuf_in_use == NULL) {
1606 		cmn_err(CE_WARN,
1607 		    "!%s: lbuf_inuse_list alloc: failed",
1608 		    __func__);
1609 		rx_ring->lbuf_in_use_count = 0;
1610 		goto alloc_lbuf_err;
1611 	}
1612 	rx_ring->lbq_use_head = 0;
1613 	rx_ring->lbq_use_tail = 0;
1614 	rx_ring->lbq_free_head = 0;
1615 	rx_ring->lbq_free_tail = 0;
1616 
1617 	lbq_buf_size = (qlge->mtu == ETHERMTU) ?
1618 	    LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE;
1619 
1620 	lbq_desc = &rx_ring->lbq_desc[0];
1621 	for (i = 0; i < rx_ring->lbq_len; i++, lbq_desc++) {
1622 		rx_ring->lbq_buf_size = lbq_buf_size;
1623 		/* Allocate buffer */
1624 		if (ql_alloc_phys_rbuf(qlge->dip, &lbq_desc->bd_dma.dma_handle,
1625 		    &ql_buf_acc_attr,
1626 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1627 		    &lbq_desc->bd_dma.acc_handle,
1628 		    (size_t)rx_ring->lbq_buf_size,  /* mem size */
1629 		    (size_t)0, /* default alignment */
1630 		    (caddr_t *)&lbq_desc->bd_dma.vaddr,
1631 		    &dma_cookie) != 0) {
1632 			cmn_err(CE_WARN,
1633 			    "!%s: ddi_dma_alloc_handle: failed",
1634 			    __func__);
1635 			goto alloc_lbuf_err;
1636 		}
1637 
1638 		/* Set context for Return buffer callback */
1639 		lbq_desc->bd_dma.dma_addr = dma_cookie.dmac_laddress;
1640 		lbq_desc->rx_recycle.free_func = ql_release_to_lbuf_free_list;
1641 		lbq_desc->rx_recycle.free_arg  = (caddr_t)lbq_desc;
1642 		lbq_desc->rx_ring = rx_ring;
1643 		lbq_desc->upl_inuse = 0;
1644 		lbq_desc->free_buf = 0;
1645 
1646 		lbq_desc->mp =
1647 		    desballoc((unsigned char *)(lbq_desc->bd_dma.vaddr),
1648 		    rx_ring->lbq_buf_size, 0, &lbq_desc->rx_recycle);
1649 		if (lbq_desc->mp == NULL) {
1650 			cmn_err(CE_WARN, "%s: desballoc() failed", __func__);
1651 			goto alloc_lbuf_err;
1652 		}
1653 		ql_add_lbuf_to_free_list(rx_ring, lbq_desc);
1654 	} /* For all large buffers */
1655 
1656 	return (DDI_SUCCESS);
1657 
1658 alloc_lbuf_err:
1659 	ql_free_lbq_buffers(rx_ring);
1660 	return (DDI_FAILURE);
1661 }
1662 
1663 /*
1664  * Free rx buffers
1665  */
1666 static void
1667 ql_free_rx_buffers(qlge_t *qlge)
1668 {
1669 	int i;
1670 	struct rx_ring *rx_ring;
1671 
1672 	for (i = 0; i < qlge->rx_ring_count; i++) {
1673 		rx_ring = &qlge->rx_ring[i];
1674 		if (rx_ring->type != TX_Q) {
1675 			ql_free_lbq_buffers(rx_ring);
1676 			ql_free_sbq_buffers(rx_ring);
1677 		}
1678 	}
1679 }
1680 
1681 /*
1682  * Allocate rx buffers
1683  */
1684 static int
1685 ql_alloc_rx_buffers(qlge_t *qlge)
1686 {
1687 	struct rx_ring *rx_ring;
1688 	int i;
1689 
1690 	for (i = 0; i < qlge->rx_ring_count; i++) {
1691 		rx_ring = &qlge->rx_ring[i];
1692 		if (rx_ring->type != TX_Q) {
1693 			if (ql_alloc_sbufs(qlge, rx_ring) != DDI_SUCCESS)
1694 				goto alloc_err;
1695 			if (ql_alloc_lbufs(qlge, rx_ring) != DDI_SUCCESS)
1696 				goto alloc_err;
1697 		}
1698 	}
1699 #ifdef QLGE_TRACK_BUFFER_USAGE
1700 	for (i = 0; i < qlge->rx_ring_count; i++) {
1701 		if (qlge->rx_ring[i].type == RX_Q) {
1702 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
1703 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
1704 		}
1705 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
1706 	}
1707 #endif
1708 	return (DDI_SUCCESS);
1709 
1710 alloc_err:
1711 
1712 	return (DDI_FAILURE);
1713 }
1714 
1715 /*
1716  * Initialize large buffer queue ring
1717  */
1718 static void
1719 ql_init_lbq_ring(struct rx_ring *rx_ring)
1720 {
1721 	uint16_t i;
1722 	struct bq_desc *lbq_desc;
1723 
1724 	bzero(rx_ring->lbq_desc, rx_ring->lbq_len * sizeof (struct bq_desc));
1725 	for (i = 0; i < rx_ring->lbq_len; i++) {
1726 		lbq_desc = &rx_ring->lbq_desc[i];
1727 		lbq_desc->index = i;
1728 	}
1729 }
1730 
1731 /*
1732  * Initialize small buffer queue ring
1733  */
1734 static void
1735 ql_init_sbq_ring(struct rx_ring *rx_ring)
1736 {
1737 	uint16_t i;
1738 	struct bq_desc *sbq_desc;
1739 
1740 	bzero(rx_ring->sbq_desc, rx_ring->sbq_len * sizeof (struct bq_desc));
1741 	for (i = 0; i < rx_ring->sbq_len; i++) {
1742 		sbq_desc = &rx_ring->sbq_desc[i];
1743 		sbq_desc->index = i;
1744 	}
1745 }
1746 
1747 /*
1748  * Calculate the pseudo-header checksum if hardware can not do
1749  */
1750 static void
1751 ql_pseudo_cksum(uint8_t *buf)
1752 {
1753 	uint32_t cksum;
1754 	uint16_t iphl;
1755 	uint16_t proto;
1756 
1757 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
1758 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
1759 	cksum += proto = buf[9];
1760 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
1761 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
1762 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
1763 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
1764 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1765 	cksum = (cksum>>16) + (cksum & 0xFFFF);
1766 
1767 	/*
1768 	 * Point it to the TCP/UDP header, and
1769 	 * update the checksum field.
1770 	 */
1771 	buf += iphl + ((proto == IPPROTO_TCP) ?
1772 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
1773 
1774 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
1775 
1776 }
1777 
1778 /*
1779  * Transmit an incoming packet.
1780  */
1781 mblk_t *
1782 ql_ring_tx(void *arg, mblk_t *mp)
1783 {
1784 	struct tx_ring *tx_ring = (struct tx_ring *)arg;
1785 	qlge_t *qlge = tx_ring->qlge;
1786 	mblk_t *next;
1787 	int rval;
1788 	uint32_t tx_count = 0;
1789 
1790 	if (qlge->port_link_state == LS_DOWN) {
1791 		/* can not send message while link is down */
1792 		mblk_t *tp;
1793 
1794 		while (mp != NULL) {
1795 			tp = mp->b_next;
1796 			mp->b_next = NULL;
1797 			freemsg(mp);
1798 			mp = tp;
1799 		}
1800 		goto exit;
1801 	}
1802 
1803 	mutex_enter(&tx_ring->tx_lock);
1804 	/* if mac is not started, driver is not ready, can not send */
1805 	if (tx_ring->mac_flags != QL_MAC_STARTED) {
1806 		cmn_err(CE_WARN, "%s(%d)ring not started, mode %d "
1807 		    " return packets",
1808 		    __func__, qlge->instance, tx_ring->mac_flags);
1809 		mutex_exit(&tx_ring->tx_lock);
1810 		goto exit;
1811 	}
1812 
1813 	/* we must try to send all */
1814 	while (mp != NULL) {
1815 		/*
1816 		 * if number of available slots is less than a threshold,
1817 		 * then quit
1818 		 */
1819 		if (tx_ring->tx_free_count <= TX_STOP_THRESHOLD) {
1820 			tx_ring->queue_stopped = 1;
1821 			rval = DDI_FAILURE;
1822 #ifdef QLGE_LOAD_UNLOAD
1823 			cmn_err(CE_WARN, "%s(%d) no resources",
1824 			    __func__, qlge->instance);
1825 #endif
1826 			tx_ring->defer++;
1827 			/*
1828 			 * If we return the buffer back we are expected to call
1829 			 * mac_tx_ring_update() when resources are available
1830 			 */
1831 			break;
1832 		}
1833 
1834 		next = mp->b_next;
1835 		mp->b_next = NULL;
1836 
1837 		rval = ql_send_common(tx_ring, mp);
1838 
1839 		if (rval != DDI_SUCCESS) {
1840 			mp->b_next = next;
1841 			break;
1842 		}
1843 		tx_count++;
1844 		mp = next;
1845 	}
1846 
1847 	/*
1848 	 * After all msg blocks are mapped or copied to tx buffer,
1849 	 * trigger the hardware to send!
1850 	 */
1851 	if (tx_count > 0) {
1852 		ql_write_doorbell_reg(tx_ring->qlge, tx_ring->prod_idx_db_reg,
1853 		    tx_ring->prod_idx);
1854 	}
1855 
1856 	mutex_exit(&tx_ring->tx_lock);
1857 exit:
1858 	return (mp);
1859 }
1860 
1861 
1862 /*
1863  * This function builds an mblk list for the given inbound
1864  * completion.
1865  */
1866 
1867 static mblk_t *
1868 ql_build_rx_mp(qlge_t *qlge, struct rx_ring *rx_ring,
1869     struct ib_mac_iocb_rsp *ib_mac_rsp)
1870 {
1871 	mblk_t *mp = NULL;
1872 	mblk_t *mp1 = NULL;	/* packet header */
1873 	mblk_t *mp2 = NULL;	/* packet content */
1874 	struct bq_desc *lbq_desc;
1875 	struct bq_desc *sbq_desc;
1876 	uint32_t err_flag = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK);
1877 	uint32_t payload_len = le32_to_cpu(ib_mac_rsp->data_len);
1878 	uint32_t header_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1879 	uint32_t pkt_len = payload_len + header_len;
1880 	uint32_t done;
1881 	uint64_t *curr_ial_ptr;
1882 	uint32_t ial_data_addr_low;
1883 	uint32_t actual_data_addr_low;
1884 	mblk_t *mp_ial = NULL;	/* ial chained packets */
1885 	uint32_t size;
1886 
1887 	/*
1888 	 * Check if error flags are set
1889 	 */
1890 	if (err_flag != 0) {
1891 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_OVERSIZE) != 0)
1892 			rx_ring->frame_too_long++;
1893 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_UNDERSIZE) != 0)
1894 			rx_ring->frame_too_short++;
1895 		if ((err_flag & IB_MAC_IOCB_RSP_ERR_CRC) != 0)
1896 			rx_ring->fcs_err++;
1897 #ifdef QLGE_LOAD_UNLOAD
1898 		cmn_err(CE_WARN, "bad packet, type 0x%x", err_flag);
1899 #endif
1900 		QL_DUMP(DBG_RX, "qlge_ring_rx: bad response iocb dump\n",
1901 		    (uint8_t *)ib_mac_rsp, 8,
1902 		    (size_t)sizeof (struct ib_mac_iocb_rsp));
1903 	}
1904 
1905 	/* header should not be in large buffer */
1906 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL) {
1907 		cmn_err(CE_WARN, "header in large buffer or invalid!");
1908 		err_flag |= 1;
1909 	}
1910 	/* if whole packet is too big than rx buffer size */
1911 	if (pkt_len > qlge->max_frame_size) {
1912 		cmn_err(CE_WARN, "ql_build_rx_mpframe too long(%d)!", pkt_len);
1913 		err_flag |= 1;
1914 	}
1915 
1916 	/*
1917 	 * Handle the header buffer if present.
1918 	 * packet header must be valid and saved in one small buffer
1919 	 * broadcast/multicast packets' headers not splitted
1920 	 */
1921 	if ((ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) &&
1922 	    (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1923 		QL_PRINT(DBG_RX, ("Header of %d bytes in small buffer.\n",
1924 		    header_len));
1925 		/* Sync access */
1926 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1927 
1928 		ASSERT(sbq_desc != NULL);
1929 
1930 		/*
1931 		 * Validate addresses from the ASIC with the
1932 		 * expected sbuf address
1933 		 */
1934 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
1935 		    != ib_mac_rsp->hdr_addr) {
1936 			/* Small buffer address mismatch */
1937 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
1938 			    " in wrong small buffer",
1939 			    __func__, qlge->instance, rx_ring->cq_id);
1940 			goto fatal_error;
1941 		}
1942 		/* get this packet */
1943 		mp1 = sbq_desc->mp;
1944 		if ((err_flag != 0)|| (mp1 == NULL)) {
1945 			/* failed on this packet, put it back for re-arming */
1946 #ifdef QLGE_LOAD_UNLOAD
1947 			cmn_err(CE_WARN, "get header from small buffer fail");
1948 #endif
1949 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
1950 			mp1 = NULL;
1951 		} else {
1952 			/* Flush DMA'd data */
1953 			(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
1954 			    0, header_len, DDI_DMA_SYNC_FORKERNEL);
1955 
1956 			if ((qlge->ip_hdr_offset != 0)&&
1957 			    (header_len < SMALL_BUFFER_SIZE)) {
1958 				/*
1959 				 * copy entire header to a 2 bytes boundary
1960 				 * address for 8100 adapters so that the IP
1961 				 * header can be on a 4 byte boundary address
1962 				 */
1963 				bcopy(mp1->b_rptr,
1964 				    (mp1->b_rptr + SMALL_BUFFER_SIZE +
1965 				    qlge->ip_hdr_offset),
1966 				    header_len);
1967 				mp1->b_rptr += SMALL_BUFFER_SIZE +
1968 				    qlge->ip_hdr_offset;
1969 			}
1970 
1971 			/*
1972 			 * Adjust the mp payload_len to match
1973 			 * the packet header payload_len
1974 			 */
1975 			mp1->b_wptr = mp1->b_rptr + header_len;
1976 			mp1->b_next = mp1->b_cont = NULL;
1977 			QL_DUMP(DBG_RX, "\t RX packet header dump:\n",
1978 			    (uint8_t *)mp1->b_rptr, 8, header_len);
1979 		}
1980 	}
1981 
1982 	/*
1983 	 * packet data or whole packet can be in small or one or
1984 	 * several large buffer(s)
1985 	 */
1986 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1987 		/*
1988 		 * The data is in a single small buffer.
1989 		 */
1990 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
1991 
1992 		ASSERT(sbq_desc != NULL);
1993 
1994 		QL_PRINT(DBG_RX,
1995 		    ("%d bytes in a single small buffer, sbq_desc = %p, "
1996 		    "sbq_desc->bd_dma.dma_addr = %x,"
1997 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
1998 		    payload_len, sbq_desc, sbq_desc->bd_dma.dma_addr,
1999 		    ib_mac_rsp->data_addr, sbq_desc->mp));
2000 
2001 		/*
2002 		 * Validate  addresses from the ASIC with the
2003 		 * expected sbuf address
2004 		 */
2005 		if (cpu_to_le64(sbq_desc->bd_dma.dma_addr)
2006 		    != ib_mac_rsp->data_addr) {
2007 			/* Small buffer address mismatch */
2008 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2009 			    " in wrong small buffer",
2010 			    __func__, qlge->instance, rx_ring->cq_id);
2011 			goto fatal_error;
2012 		}
2013 		/* get this packet */
2014 		mp2 = sbq_desc->mp;
2015 		if ((err_flag != 0) || (mp2 == NULL)) {
2016 #ifdef QLGE_LOAD_UNLOAD
2017 			/* failed on this packet, put it back for re-arming */
2018 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2019 #endif
2020 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2021 			mp2 = NULL;
2022 		} else {
2023 			/* Adjust the buffer length to match the payload_len */
2024 			mp2->b_wptr = mp2->b_rptr + payload_len;
2025 			mp2->b_next = mp2->b_cont = NULL;
2026 			/* Flush DMA'd data */
2027 			(void) ddi_dma_sync(sbq_desc->bd_dma.dma_handle,
2028 			    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2029 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2030 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2031 			/*
2032 			 * if payload is too small , copy to
2033 			 * the end of packet header
2034 			 */
2035 			if ((mp1 != NULL) &&
2036 			    (payload_len <= qlge->payload_copy_thresh) &&
2037 			    (pkt_len <
2038 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2039 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2040 				mp1->b_wptr += payload_len;
2041 				freemsg(mp2);
2042 				mp2 = NULL;
2043 			}
2044 		}
2045 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2046 		/*
2047 		 * The data is in a single large buffer.
2048 		 */
2049 		lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2050 
2051 		QL_PRINT(DBG_RX,
2052 		    ("%d bytes in a single large buffer, lbq_desc = %p, "
2053 		    "lbq_desc->bd_dma.dma_addr = %x,"
2054 		    " ib_mac_rsp->data_addr = %x, mp = %p\n",
2055 		    payload_len, lbq_desc, lbq_desc->bd_dma.dma_addr,
2056 		    ib_mac_rsp->data_addr, lbq_desc->mp));
2057 
2058 		ASSERT(lbq_desc != NULL);
2059 
2060 		/*
2061 		 * Validate  addresses from the ASIC with
2062 		 * the expected lbuf address
2063 		 */
2064 		if (cpu_to_le64(lbq_desc->bd_dma.dma_addr)
2065 		    != ib_mac_rsp->data_addr) {
2066 			/* Large buffer address mismatch */
2067 			cmn_err(CE_WARN, "%s(%d) ring%d packet saved"
2068 			    " in wrong large buffer",
2069 			    __func__, qlge->instance, rx_ring->cq_id);
2070 			goto fatal_error;
2071 		}
2072 		mp2 = lbq_desc->mp;
2073 		if ((err_flag != 0) || (mp2 == NULL)) {
2074 #ifdef QLGE_LOAD_UNLOAD
2075 			cmn_err(CE_WARN, "ignore bad data from large buffer");
2076 #endif
2077 			/* failed on this packet, put it back for re-arming */
2078 			ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2079 			mp2 = NULL;
2080 		} else {
2081 			/*
2082 			 * Adjust the buffer length to match
2083 			 * the packet payload_len
2084 			 */
2085 			mp2->b_wptr = mp2->b_rptr + payload_len;
2086 			mp2->b_next = mp2->b_cont = NULL;
2087 			/* Flush DMA'd data */
2088 			(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2089 			    0, payload_len, DDI_DMA_SYNC_FORKERNEL);
2090 			QL_DUMP(DBG_RX, "\t RX packet payload dump:\n",
2091 			    (uint8_t *)mp2->b_rptr, 8, payload_len);
2092 			/*
2093 			 * if payload is too small , copy to
2094 			 * the end of packet header
2095 			 */
2096 			if ((mp1 != NULL) &&
2097 			    (payload_len <= qlge->payload_copy_thresh) &&
2098 			    (pkt_len<
2099 			    (SMALL_BUFFER_SIZE - qlge->ip_hdr_offset))) {
2100 				bcopy(mp2->b_rptr, mp1->b_wptr, payload_len);
2101 				mp1->b_wptr += payload_len;
2102 				freemsg(mp2);
2103 				mp2 = NULL;
2104 			}
2105 		}
2106 	} else if (payload_len) {
2107 		/*
2108 		 * payload available but not in sml nor lrg buffer,
2109 		 * so, it is saved in IAL
2110 		 */
2111 #ifdef QLGE_LOAD_UNLOAD
2112 		cmn_err(CE_NOTE, "packet chained in IAL \n");
2113 #endif
2114 		/* lrg buf addresses are saved in one small buffer */
2115 		sbq_desc = ql_get_sbuf_from_in_use_list(rx_ring);
2116 		curr_ial_ptr = (uint64_t *)sbq_desc->bd_dma.vaddr;
2117 		done = 0;
2118 		while (!done) {
2119 			ial_data_addr_low =
2120 			    (uint32_t)(le64_to_cpu(*curr_ial_ptr) &
2121 			    0xFFFFFFFE);
2122 			/* check if this is the last packet fragment */
2123 			done = (uint32_t)(le64_to_cpu(*curr_ial_ptr) & 1);
2124 			curr_ial_ptr++;
2125 			/*
2126 			 * The data is in one or several large buffer(s).
2127 			 */
2128 			lbq_desc = ql_get_lbuf_from_in_use_list(rx_ring);
2129 			actual_data_addr_low =
2130 			    (uint32_t)(lbq_desc->bd_dma.dma_addr &
2131 			    0xFFFFFFFE);
2132 			if (ial_data_addr_low != actual_data_addr_low) {
2133 				cmn_err(CE_WARN,
2134 				    "packet saved in wrong ial lrg buffer"
2135 				    " expected %x, actual %lx",
2136 				    ial_data_addr_low,
2137 				    (uintptr_t)lbq_desc->bd_dma.dma_addr);
2138 				goto fatal_error;
2139 			}
2140 
2141 			size = (payload_len < rx_ring->lbq_buf_size)?
2142 			    payload_len : rx_ring->lbq_buf_size;
2143 			payload_len -= size;
2144 			mp2 = lbq_desc->mp;
2145 			if ((err_flag != 0) || (mp2 == NULL)) {
2146 #ifdef QLGE_LOAD_UNLOAD
2147 				cmn_err(CE_WARN,
2148 				    "ignore bad data from large buffer");
2149 #endif
2150 				ql_refill_lbuf_free_list(lbq_desc, B_FALSE);
2151 				mp2 = NULL;
2152 			} else {
2153 				if (mp_ial == NULL) {
2154 					mp_ial = mp2;
2155 				} else {
2156 					linkb(mp_ial, mp2);
2157 				}
2158 
2159 				mp2->b_next = NULL;
2160 				mp2->b_cont = NULL;
2161 				mp2->b_wptr = mp2->b_rptr + size;
2162 				/* Flush DMA'd data */
2163 				(void) ddi_dma_sync(lbq_desc->bd_dma.dma_handle,
2164 				    0, size, DDI_DMA_SYNC_FORKERNEL);
2165 				QL_PRINT(DBG_RX, ("ial %d payload received \n",
2166 				    size));
2167 				QL_DUMP(DBG_RX, "\t Mac data dump:\n",
2168 				    (uint8_t *)mp2->b_rptr, 8, size);
2169 			}
2170 		}
2171 		if (err_flag != 0) {
2172 #ifdef QLGE_LOAD_UNLOAD
2173 			/* failed on this packet, put it back for re-arming */
2174 			cmn_err(CE_WARN, "ignore bad data from small buffer");
2175 #endif
2176 			ql_refill_sbuf_free_list(sbq_desc, B_FALSE);
2177 		} else {
2178 			mp2 = mp_ial;
2179 			freemsg(sbq_desc->mp);
2180 		}
2181 	}
2182 	/*
2183 	 * some packets' hdr not split, then send mp2 upstream, otherwise,
2184 	 * concatenate message block mp2 to the tail of message header, mp1
2185 	 */
2186 	if (!err_flag) {
2187 		if (mp1) {
2188 			if (mp2) {
2189 				QL_PRINT(DBG_RX, ("packet in mp1 and mp2\n"));
2190 				linkb(mp1, mp2); /* mp1->b_cont = mp2; */
2191 				mp = mp1;
2192 			} else {
2193 				QL_PRINT(DBG_RX, ("packet in mp1 only\n"));
2194 				mp = mp1;
2195 			}
2196 		} else if (mp2) {
2197 			QL_PRINT(DBG_RX, ("packet in mp2 only\n"));
2198 			mp = mp2;
2199 		}
2200 	}
2201 	return (mp);
2202 
2203 fatal_error:
2204 	/* fatal Error! */
2205 	if (qlge->fm_enable) {
2206 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2207 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2208 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2209 	}
2210 	/* *mp->b_wptr = 0; */
2211 	ql_wake_asic_reset_soft_intr(qlge);
2212 	return (NULL);
2213 
2214 }
2215 
2216 /*
2217  * Bump completion queue consumer index.
2218  */
2219 static void
2220 ql_update_cq(struct rx_ring *rx_ring)
2221 {
2222 	rx_ring->cnsmr_idx++;
2223 	rx_ring->curr_entry++;
2224 	if (rx_ring->cnsmr_idx >= rx_ring->cq_len) {
2225 		rx_ring->cnsmr_idx = 0;
2226 		rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
2227 	}
2228 }
2229 
2230 /*
2231  * Update completion queue consumer index.
2232  */
2233 static void
2234 ql_write_cq_idx(struct rx_ring *rx_ring)
2235 {
2236 	qlge_t *qlge = rx_ring->qlge;
2237 
2238 	ql_write_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg,
2239 	    rx_ring->cnsmr_idx);
2240 }
2241 
2242 /*
2243  * Processes a SYS-Chip Event Notification Completion Event.
2244  * The incoming notification event that describes a link up/down
2245  * or some sorts of error happens.
2246  */
2247 static void
2248 ql_process_chip_ae_intr(qlge_t *qlge,
2249     struct ib_sys_event_iocb_rsp *ib_sys_event_rsp_ptr)
2250 {
2251 	uint8_t eventType = ib_sys_event_rsp_ptr->event_type;
2252 	uint32_t soft_req = 0;
2253 
2254 	switch (eventType) {
2255 		case SYS_EVENT_PORT_LINK_UP:	/* 0x0h */
2256 			QL_PRINT(DBG_MBX, ("Port Link Up\n"));
2257 			break;
2258 
2259 		case SYS_EVENT_PORT_LINK_DOWN:	/* 0x1h */
2260 			QL_PRINT(DBG_MBX, ("Port Link Down\n"));
2261 			break;
2262 
2263 		case SYS_EVENT_MULTIPLE_CAM_HITS : /* 0x6h */
2264 			cmn_err(CE_WARN, "A multiple CAM hits look up error "
2265 			    "occurred");
2266 			soft_req |= NEED_HW_RESET;
2267 			break;
2268 
2269 		case SYS_EVENT_SOFT_ECC_ERR:	/* 0x7h */
2270 			cmn_err(CE_WARN, "Soft ECC error detected");
2271 			soft_req |= NEED_HW_RESET;
2272 			break;
2273 
2274 		case SYS_EVENT_MGMT_FATAL_ERR:	/* 0x8h */
2275 			cmn_err(CE_WARN, "Management (MPI) Processor fatal"
2276 			    " error occured");
2277 			soft_req |= NEED_MPI_RESET;
2278 			break;
2279 
2280 		case SYS_EVENT_MAC_INTERRUPT:	/* 0x9h */
2281 			QL_PRINT(DBG_MBX, ("MAC Interrupt"));
2282 			break;
2283 
2284 		case SYS_EVENT_PCI_ERR_READING_SML_LRG_BUF:	/* 0x40h */
2285 			cmn_err(CE_WARN, "PCI Error reading small/large "
2286 			    "buffers occured");
2287 			soft_req |= NEED_HW_RESET;
2288 			break;
2289 
2290 		default:
2291 			QL_PRINT(DBG_RX, ("%s(%d) unknown Sys Event: "
2292 			    "type 0x%x occured",
2293 			    __func__, qlge->instance, eventType));
2294 			break;
2295 	}
2296 
2297 	if ((soft_req & NEED_MPI_RESET) != 0) {
2298 		ql_wake_mpi_reset_soft_intr(qlge);
2299 		if (qlge->fm_enable) {
2300 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2301 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2302 		}
2303 	} else if ((soft_req & NEED_HW_RESET) != 0) {
2304 		ql_wake_asic_reset_soft_intr(qlge);
2305 		if (qlge->fm_enable) {
2306 			ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2307 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_DEGRADED);
2308 		}
2309 	}
2310 }
2311 
2312 /*
2313  * set received packet checksum flag
2314  */
2315 void
2316 ql_set_rx_cksum(mblk_t *mp, struct ib_mac_iocb_rsp *net_rsp)
2317 {
2318 	uint32_t flags;
2319 
2320 	/* Not TCP or UDP packet? nothing more to do */
2321 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) == 0) &&
2322 	    ((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) == 0))
2323 	return;
2324 
2325 	/* No CKO support for IPv6 */
2326 	if ((net_rsp->flags3 & IB_MAC_IOCB_RSP_V6) != 0)
2327 		return;
2328 
2329 	/*
2330 	 * If checksum error, don't set flags; stack will calculate
2331 	 * checksum, detect the error and update statistics
2332 	 */
2333 	if (((net_rsp->flags1 & IB_MAC_IOCB_RSP_TE) != 0) ||
2334 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_IE) != 0))
2335 		return;
2336 
2337 	/* TCP or UDP packet and checksum valid */
2338 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_T) != 0) &&
2339 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2340 		flags = HCK_FULLCKSUM_OK;
2341 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2342 	}
2343 	if (((net_rsp->flags2 & IB_MAC_IOCB_RSP_U) != 0) &&
2344 	    ((net_rsp->flags1 & IB_MAC_IOCB_RSP_NU) == 0)) {
2345 		flags = HCK_FULLCKSUM_OK;
2346 		mac_hcksum_set(mp, 0, 0, 0, 0, flags);
2347 	}
2348 }
2349 
2350 /*
2351  * This function goes through h/w descriptor in one specified rx ring,
2352  * receives the data if the descriptor status shows the data is ready.
2353  * It returns a chain of mblks containing the received data, to be
2354  * passed up to mac_rx_ring().
2355  */
2356 mblk_t *
2357 ql_ring_rx(struct rx_ring *rx_ring, int poll_bytes)
2358 {
2359 	qlge_t *qlge = rx_ring->qlge;
2360 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2361 	struct ib_mac_iocb_rsp *net_rsp;
2362 	mblk_t *mp;
2363 	mblk_t *mblk_head;
2364 	mblk_t **mblk_tail;
2365 	uint32_t received_bytes = 0;
2366 	uint32_t length;
2367 #ifdef QLGE_PERFORMANCE
2368 	uint32_t pkt_ct = 0;
2369 #endif
2370 
2371 #ifdef QLGE_TRACK_BUFFER_USAGE
2372 	uint32_t consumer_idx;
2373 	uint32_t producer_idx;
2374 	uint32_t num_free_entries;
2375 	uint32_t temp;
2376 
2377 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2378 	consumer_idx = temp & 0x0000ffff;
2379 	producer_idx = (temp >> 16);
2380 
2381 	if (consumer_idx > producer_idx)
2382 		num_free_entries = (consumer_idx - producer_idx);
2383 	else
2384 		num_free_entries = NUM_RX_RING_ENTRIES - (
2385 		    producer_idx - consumer_idx);
2386 
2387 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2388 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2389 
2390 #endif
2391 	mblk_head = NULL;
2392 	mblk_tail = &mblk_head;
2393 
2394 	while ((prod != rx_ring->cnsmr_idx)) {
2395 		QL_PRINT(DBG_RX,
2396 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n",
2397 		    __func__, rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2398 
2399 		net_rsp = (struct ib_mac_iocb_rsp *)rx_ring->curr_entry;
2400 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2401 		    (off_t)((uintptr_t)net_rsp -
2402 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2403 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2404 		QL_DUMP(DBG_RX, "qlge_ring_rx: rx completion iocb\n",
2405 		    rx_ring->curr_entry, 8, (size_t)sizeof (*net_rsp));
2406 
2407 		switch (net_rsp->opcode) {
2408 
2409 		case OPCODE_IB_MAC_IOCB:
2410 			/* Adding length of pkt header and payload */
2411 			length = le32_to_cpu(net_rsp->data_len) +
2412 			    le32_to_cpu(net_rsp->hdr_len);
2413 			if ((poll_bytes != QLGE_POLL_ALL) &&
2414 			    ((received_bytes + length) > poll_bytes)) {
2415 				continue;
2416 			}
2417 			received_bytes += length;
2418 
2419 #ifdef QLGE_PERFORMANCE
2420 			pkt_ct++;
2421 #endif
2422 			mp = ql_build_rx_mp(qlge, rx_ring, net_rsp);
2423 			if (mp != NULL) {
2424 				if (rx_ring->mac_flags != QL_MAC_STARTED) {
2425 					/*
2426 					 * Increment number of packets we have
2427 					 * indicated to the stack, should be
2428 					 * decremented when we get it back
2429 					 * or when freemsg is called
2430 					 */
2431 					ASSERT(rx_ring->rx_indicate
2432 					    <= rx_ring->cq_len);
2433 #ifdef QLGE_LOAD_UNLOAD
2434 					cmn_err(CE_WARN, "%s do not send to OS,"
2435 					    " mac_flags %d, indicate %d",
2436 					    __func__, rx_ring->mac_flags,
2437 					    rx_ring->rx_indicate);
2438 #endif
2439 					QL_PRINT(DBG_RX,
2440 					    ("cq_id = %d, packet "
2441 					    "dropped, mac not "
2442 					    "enabled.\n",
2443 					    rx_ring->cq_id));
2444 					rx_ring->rx_pkt_dropped_mac_unenabled++;
2445 
2446 					/* rx_lock is expected to be held */
2447 					mutex_exit(&rx_ring->rx_lock);
2448 					freemsg(mp);
2449 					mutex_enter(&rx_ring->rx_lock);
2450 					mp = NULL;
2451 				}
2452 
2453 				if (mp != NULL) {
2454 					/*
2455 					 * IP full packet has been
2456 					 * successfully verified by
2457 					 * H/W and is correct
2458 					 */
2459 					ql_set_rx_cksum(mp, net_rsp);
2460 
2461 					rx_ring->rx_packets++;
2462 					rx_ring->rx_bytes += length;
2463 					*mblk_tail = mp;
2464 					mblk_tail = &mp->b_next;
2465 				}
2466 			} else {
2467 				QL_PRINT(DBG_RX,
2468 				    ("cq_id = %d, packet dropped\n",
2469 				    rx_ring->cq_id));
2470 				rx_ring->rx_packets_dropped_no_buffer++;
2471 			}
2472 			break;
2473 
2474 		case OPCODE_IB_SYS_EVENT_IOCB:
2475 			ql_process_chip_ae_intr(qlge,
2476 			    (struct ib_sys_event_iocb_rsp *)
2477 			    net_rsp);
2478 			break;
2479 
2480 		default:
2481 			cmn_err(CE_WARN,
2482 			    "%s Ring(%d)Hit default case, not handled!"
2483 			    " dropping the packet, "
2484 			    "opcode = %x.", __func__, rx_ring->cq_id,
2485 			    net_rsp->opcode);
2486 			break;
2487 		}
2488 		/* increment cnsmr_idx and curr_entry */
2489 		ql_update_cq(rx_ring);
2490 		prod = ql_read_sh_reg(qlge, rx_ring);
2491 
2492 	}
2493 
2494 #ifdef QLGE_PERFORMANCE
2495 	if (pkt_ct >= 7)
2496 		rx_ring->hist[7]++;
2497 	else if (pkt_ct == 6)
2498 		rx_ring->hist[6]++;
2499 	else if (pkt_ct == 5)
2500 		rx_ring->hist[5]++;
2501 	else if (pkt_ct == 4)
2502 		rx_ring->hist[4]++;
2503 	else if (pkt_ct == 3)
2504 		rx_ring->hist[3]++;
2505 	else if (pkt_ct == 2)
2506 		rx_ring->hist[2]++;
2507 	else if (pkt_ct == 1)
2508 		rx_ring->hist[1]++;
2509 	else if (pkt_ct == 0)
2510 		rx_ring->hist[0]++;
2511 #endif
2512 
2513 	/* update cnsmr_idx */
2514 	ql_write_cq_idx(rx_ring);
2515 	/* do not enable interrupt for polling mode */
2516 	if (poll_bytes == QLGE_POLL_ALL)
2517 		ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2518 	return (mblk_head);
2519 }
2520 
2521 /* Process an outbound completion from an rx ring. */
2522 static void
2523 ql_process_mac_tx_intr(qlge_t *qlge, struct ob_mac_iocb_rsp *mac_rsp)
2524 {
2525 	struct tx_ring *tx_ring;
2526 	struct tx_ring_desc *tx_ring_desc;
2527 	int j;
2528 
2529 	tx_ring = &qlge->tx_ring[mac_rsp->txq_idx];
2530 	tx_ring_desc = tx_ring->wq_desc;
2531 	tx_ring_desc += mac_rsp->tid;
2532 
2533 	if (tx_ring_desc->tx_type == USE_DMA) {
2534 		QL_PRINT(DBG_TX, ("%s(%d): tx type USE_DMA\n",
2535 		    __func__, qlge->instance));
2536 
2537 		/*
2538 		 * Release the DMA resource that is used for
2539 		 * DMA binding.
2540 		 */
2541 		for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
2542 			(void) ddi_dma_unbind_handle(
2543 			    tx_ring_desc->tx_dma_handle[j]);
2544 		}
2545 
2546 		tx_ring_desc->tx_dma_handle_used = 0;
2547 		/*
2548 		 * Free the mblk after sending completed
2549 		 */
2550 		if (tx_ring_desc->mp != NULL) {
2551 			freemsg(tx_ring_desc->mp);
2552 			tx_ring_desc->mp = NULL;
2553 		}
2554 	}
2555 
2556 	tx_ring->obytes += tx_ring_desc->tx_bytes;
2557 	tx_ring->opackets++;
2558 
2559 	if (mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S |
2560 	    OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_B)) {
2561 		tx_ring->errxmt++;
2562 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2563 			/* EMPTY */
2564 			QL_PRINT(DBG_TX,
2565 			    ("Total descriptor length did not match "
2566 			    "transfer length.\n"));
2567 		}
2568 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2569 			/* EMPTY */
2570 			QL_PRINT(DBG_TX,
2571 			    ("Frame too short to be legal, not sent.\n"));
2572 		}
2573 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2574 			/* EMPTY */
2575 			QL_PRINT(DBG_TX,
2576 			    ("Frame too long, but sent anyway.\n"));
2577 		}
2578 		if (mac_rsp->flags3 & OB_MAC_IOCB_RSP_B) {
2579 			/* EMPTY */
2580 			QL_PRINT(DBG_TX,
2581 			    ("PCI backplane error. Frame not sent.\n"));
2582 		}
2583 	}
2584 	atomic_inc_32(&tx_ring->tx_free_count);
2585 }
2586 
2587 /*
2588  * clean up tx completion iocbs
2589  */
2590 int
2591 ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2592 {
2593 	qlge_t *qlge = rx_ring->qlge;
2594 	uint32_t prod = ql_read_sh_reg(qlge, rx_ring);
2595 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2596 	int count = 0;
2597 	struct tx_ring *tx_ring;
2598 	boolean_t resume_tx = B_FALSE;
2599 
2600 	mutex_enter(&rx_ring->rx_lock);
2601 #ifdef QLGE_TRACK_BUFFER_USAGE
2602 	{
2603 	uint32_t consumer_idx;
2604 	uint32_t producer_idx;
2605 	uint32_t num_free_entries;
2606 	uint32_t temp;
2607 
2608 	temp = ql_read_doorbell_reg(qlge, rx_ring->cnsmr_idx_db_reg);
2609 	consumer_idx = temp & 0x0000ffff;
2610 	producer_idx = (temp >> 16);
2611 
2612 	if (consumer_idx > producer_idx)
2613 		num_free_entries = (consumer_idx - producer_idx);
2614 	else
2615 		num_free_entries = NUM_RX_RING_ENTRIES -
2616 		    (producer_idx - consumer_idx);
2617 
2618 	if (num_free_entries < qlge->cq_low_count[rx_ring->cq_id])
2619 		qlge->cq_low_count[rx_ring->cq_id] = num_free_entries;
2620 
2621 	}
2622 #endif
2623 	/* While there are entries in the completion queue. */
2624 	while (prod != rx_ring->cnsmr_idx) {
2625 
2626 		QL_PRINT(DBG_RX,
2627 		    ("%s cq_id = %d, prod = %d, cnsmr = %d.\n", __func__,
2628 		    rx_ring->cq_id, prod, rx_ring->cnsmr_idx));
2629 
2630 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2631 		(void) ddi_dma_sync(rx_ring->cq_dma.dma_handle,
2632 		    (off_t)((uintptr_t)net_rsp -
2633 		    (uintptr_t)rx_ring->cq_dma.vaddr),
2634 		    (size_t)sizeof (*net_rsp), DDI_DMA_SYNC_FORKERNEL);
2635 
2636 		QL_DUMP(DBG_RX, "ql_clean_outbound_rx_ring: "
2637 		    "response packet data\n",
2638 		    rx_ring->curr_entry, 8,
2639 		    (size_t)sizeof (*net_rsp));
2640 
2641 		switch (net_rsp->opcode) {
2642 
2643 		case OPCODE_OB_MAC_OFFLOAD_IOCB:
2644 		case OPCODE_OB_MAC_IOCB:
2645 			ql_process_mac_tx_intr(qlge, net_rsp);
2646 			break;
2647 
2648 		default:
2649 			cmn_err(CE_WARN,
2650 			    "%s Hit default case, not handled! "
2651 			    "dropping the packet,"
2652 			    " opcode = %x.",
2653 			    __func__, net_rsp->opcode);
2654 			break;
2655 		}
2656 		count++;
2657 		ql_update_cq(rx_ring);
2658 		prod = ql_read_sh_reg(qlge, rx_ring);
2659 	}
2660 	ql_write_cq_idx(rx_ring);
2661 
2662 	mutex_exit(&rx_ring->rx_lock);
2663 
2664 	net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2665 	tx_ring = &qlge->tx_ring[net_rsp->txq_idx];
2666 
2667 	mutex_enter(&tx_ring->tx_lock);
2668 
2669 	if (tx_ring->queue_stopped &&
2670 	    (tx_ring->tx_free_count > TX_RESUME_THRESHOLD)) {
2671 		/*
2672 		 * The queue got stopped because the tx_ring was full.
2673 		 * Wake it up, because it's now at least 25% empty.
2674 		 */
2675 		tx_ring->queue_stopped = 0;
2676 		resume_tx = B_TRUE;
2677 	}
2678 
2679 	mutex_exit(&tx_ring->tx_lock);
2680 	/* Don't hold the lock during OS callback */
2681 	if (resume_tx)
2682 		RESUME_TX(tx_ring);
2683 	return (count);
2684 }
2685 
2686 /*
2687  * reset asic when error happens
2688  */
2689 /* ARGSUSED */
2690 static uint_t
2691 ql_asic_reset_work(caddr_t arg1, caddr_t arg2)
2692 {
2693 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2694 	int status;
2695 
2696 	mutex_enter(&qlge->gen_mutex);
2697 	(void) ql_do_stop(qlge);
2698 	/*
2699 	 * Write default ethernet address to chip register Mac
2700 	 * Address slot 0 and Enable Primary Mac Function.
2701 	 */
2702 	mutex_enter(&qlge->hw_mutex);
2703 	(void) ql_unicst_set(qlge,
2704 	    (uint8_t *)qlge->unicst_addr[0].addr.ether_addr_octet, 0);
2705 	mutex_exit(&qlge->hw_mutex);
2706 	qlge->mac_flags = QL_MAC_INIT;
2707 	status = ql_do_start(qlge);
2708 	if (status != DDI_SUCCESS)
2709 		goto error;
2710 	qlge->mac_flags = QL_MAC_STARTED;
2711 	mutex_exit(&qlge->gen_mutex);
2712 	ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
2713 
2714 	return (DDI_INTR_CLAIMED);
2715 
2716 error:
2717 	mutex_exit(&qlge->gen_mutex);
2718 	cmn_err(CE_WARN,
2719 	    "qlge up/down cycle failed, closing device");
2720 	if (qlge->fm_enable) {
2721 		ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2722 		ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
2723 		atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2724 	}
2725 	return (DDI_INTR_CLAIMED);
2726 }
2727 
2728 /*
2729  * Reset MPI
2730  */
2731 /* ARGSUSED */
2732 static uint_t
2733 ql_mpi_reset_work(caddr_t arg1, caddr_t arg2)
2734 {
2735 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2736 
2737 	(void) ql_reset_mpi_risc(qlge);
2738 	return (DDI_INTR_CLAIMED);
2739 }
2740 
2741 /*
2742  * Process MPI mailbox messages
2743  */
2744 /* ARGSUSED */
2745 static uint_t
2746 ql_mpi_event_work(caddr_t arg1, caddr_t arg2)
2747 {
2748 	qlge_t *qlge = (qlge_t *)((void *)arg1);
2749 
2750 	ql_do_mpi_intr(qlge);
2751 	return (DDI_INTR_CLAIMED);
2752 }
2753 
2754 /* Fire up a handler to reset the MPI processor. */
2755 void
2756 ql_wake_asic_reset_soft_intr(qlge_t *qlge)
2757 {
2758 	(void) ddi_intr_trigger_softint(qlge->asic_reset_intr_hdl, NULL);
2759 }
2760 
2761 static void
2762 ql_wake_mpi_reset_soft_intr(qlge_t *qlge)
2763 {
2764 	(void) ddi_intr_trigger_softint(qlge->mpi_reset_intr_hdl, NULL);
2765 }
2766 
2767 static void
2768 ql_wake_mpi_event_soft_intr(qlge_t *qlge)
2769 {
2770 	(void) ddi_intr_trigger_softint(qlge->mpi_event_intr_hdl, NULL);
2771 }
2772 
2773 /*
2774  * This handles a fatal error, MPI activity, and the default
2775  * rx_ring in an MSI-X multiple interrupt vector environment.
2776  * In MSI/Legacy environment it also process the rest of
2777  * the rx_rings.
2778  */
2779 /* ARGSUSED */
2780 static uint_t
2781 ql_isr(caddr_t arg1, caddr_t arg2)
2782 {
2783 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2784 	struct rx_ring *ob_ring;
2785 	qlge_t *qlge = rx_ring->qlge;
2786 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
2787 	uint32_t var, prod;
2788 	int i;
2789 	int work_done = 0;
2790 
2791 	mblk_t *mp;
2792 
2793 	_NOTE(ARGUNUSED(arg2));
2794 
2795 	++qlge->rx_interrupts[rx_ring->cq_id];
2796 
2797 	if (ql_atomic_read_32(&qlge->intr_ctx[0].irq_cnt)) {
2798 		ql_write_reg(qlge, REG_RSVD7, 0xfeed0002);
2799 		var = ql_read_reg(qlge, REG_ERROR_STATUS);
2800 		var = ql_read_reg(qlge, REG_STATUS);
2801 		var = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
2802 		return (DDI_INTR_CLAIMED);
2803 	}
2804 
2805 	ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2806 
2807 	/*
2808 	 * process send completes on first stride tx ring if available
2809 	 */
2810 	if (qlge->isr_stride) {
2811 		ob_ring = &qlge->rx_ring[qlge->isr_stride];
2812 		if (ql_read_sh_reg(qlge, ob_ring) !=
2813 		    ob_ring->cnsmr_idx) {
2814 			(void) ql_clean_outbound_rx_ring(ob_ring);
2815 		}
2816 	}
2817 	/*
2818 	 * Check the default queue and wake handler if active.
2819 	 */
2820 	rx_ring = &qlge->rx_ring[0];
2821 	prod = ql_read_sh_reg(qlge, rx_ring);
2822 	QL_PRINT(DBG_INTR, ("rx-ring[0] prod index 0x%x, consumer 0x%x ",
2823 	    prod, rx_ring->cnsmr_idx));
2824 	/* check if interrupt is due to incoming packet */
2825 	if (prod != rx_ring->cnsmr_idx) {
2826 		QL_PRINT(DBG_INTR, ("Waking handler for rx_ring[0].\n"));
2827 		ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2828 		mutex_enter(&rx_ring->rx_lock);
2829 		mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2830 		mutex_exit(&rx_ring->rx_lock);
2831 
2832 		if (mp != NULL)
2833 			RX_UPSTREAM(rx_ring, mp);
2834 		work_done++;
2835 	} else {
2836 		/*
2837 		 * If interrupt is not due to incoming packet, read status
2838 		 * register to see if error happens or mailbox interrupt.
2839 		 */
2840 		var = ql_read_reg(qlge, REG_STATUS);
2841 		if ((var & STATUS_FE) != 0) {
2842 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
2843 			if (qlge->fm_enable) {
2844 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
2845 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
2846 				ddi_fm_service_impact(qlge->dip,
2847 				    DDI_SERVICE_LOST);
2848 			}
2849 			cmn_err(CE_WARN, "Got fatal error, STS = %x.", var);
2850 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
2851 			cmn_err(CE_WARN,
2852 			    "Resetting chip. Error Status Register = 0x%x",
2853 			    var);
2854 			ql_wake_asic_reset_soft_intr(qlge);
2855 			return (DDI_INTR_CLAIMED);
2856 		}
2857 
2858 		/*
2859 		 * Check MPI processor activity.
2860 		 */
2861 		if ((var & STATUS_PI) != 0) {
2862 			/*
2863 			 * We've got an async event or mailbox completion.
2864 			 * Handle it and clear the source of the interrupt.
2865 			 */
2866 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
2867 
2868 			QL_PRINT(DBG_INTR, ("Got MPI processor interrupt.\n"));
2869 			ql_disable_completion_interrupt(qlge, intr_ctx->intr);
2870 			ql_wake_mpi_event_soft_intr(qlge);
2871 			work_done++;
2872 		}
2873 	}
2874 
2875 
2876 	if (qlge->intr_type != DDI_INTR_TYPE_MSIX) {
2877 		/*
2878 		 * Start the DPC for each active queue.
2879 		 */
2880 		for (i = 1; i < qlge->rx_ring_count; i++) {
2881 			rx_ring = &qlge->rx_ring[i];
2882 
2883 			if (ql_read_sh_reg(qlge, rx_ring) !=
2884 			    rx_ring->cnsmr_idx) {
2885 				QL_PRINT(DBG_INTR,
2886 				    ("Waking handler for rx_ring[%d].\n", i));
2887 
2888 				ql_disable_completion_interrupt(qlge,
2889 				    rx_ring->irq);
2890 				if (rx_ring->type == TX_Q) {
2891 					(void) ql_clean_outbound_rx_ring(
2892 					    rx_ring);
2893 					ql_enable_completion_interrupt(
2894 					    rx_ring->qlge, rx_ring->irq);
2895 				} else {
2896 					mutex_enter(&rx_ring->rx_lock);
2897 					mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2898 					mutex_exit(&rx_ring->rx_lock);
2899 					if (mp != NULL)
2900 						RX_UPSTREAM(rx_ring, mp);
2901 #ifdef QLGE_LOAD_UNLOAD
2902 					if (rx_ring->mac_flags ==
2903 					    QL_MAC_STOPPED)
2904 						cmn_err(CE_NOTE,
2905 						    "%s rx_indicate(%d) %d\n",
2906 						    __func__, i,
2907 						    rx_ring->rx_indicate);
2908 #endif
2909 				}
2910 				work_done++;
2911 			}
2912 		}
2913 	}
2914 
2915 	ql_enable_completion_interrupt(qlge, intr_ctx->intr);
2916 
2917 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
2918 }
2919 
2920 /*
2921  * MSI-X Multiple Vector Interrupt Handler for outbound (TX) completions.
2922  */
2923 /* ARGSUSED */
2924 static uint_t
2925 ql_msix_tx_isr(caddr_t arg1, caddr_t arg2)
2926 {
2927 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2928 	qlge_t *qlge = rx_ring->qlge;
2929 	_NOTE(ARGUNUSED(arg2));
2930 
2931 	++qlge->rx_interrupts[rx_ring->cq_id];
2932 	(void) ql_clean_outbound_rx_ring(rx_ring);
2933 	ql_enable_completion_interrupt(rx_ring->qlge, rx_ring->irq);
2934 
2935 	return (DDI_INTR_CLAIMED);
2936 }
2937 
2938 /*
2939  * MSI-X Multiple Vector Interrupt Handler
2940  */
2941 /* ARGSUSED */
2942 static uint_t
2943 ql_msix_isr(caddr_t arg1, caddr_t arg2)
2944 {
2945 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
2946 	struct rx_ring *ob_ring;
2947 	qlge_t *qlge = rx_ring->qlge;
2948 	mblk_t *mp;
2949 	_NOTE(ARGUNUSED(arg2));
2950 
2951 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
2952 
2953 	ql_disable_completion_interrupt(qlge, rx_ring->irq);
2954 
2955 	/*
2956 	 * process send completes on stride tx ring if available
2957 	 */
2958 	if (qlge->isr_stride) {
2959 		ob_ring = rx_ring + qlge->isr_stride;
2960 		if (ql_read_sh_reg(qlge, ob_ring) !=
2961 		    ob_ring->cnsmr_idx) {
2962 			++qlge->rx_interrupts[ob_ring->cq_id];
2963 			(void) ql_clean_outbound_rx_ring(ob_ring);
2964 		}
2965 	}
2966 
2967 	++qlge->rx_interrupts[rx_ring->cq_id];
2968 
2969 	mutex_enter(&rx_ring->rx_lock);
2970 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
2971 	mutex_exit(&rx_ring->rx_lock);
2972 
2973 	if (mp != NULL)
2974 		RX_UPSTREAM(rx_ring, mp);
2975 
2976 	return (DDI_INTR_CLAIMED);
2977 }
2978 
2979 /*
2980  * Poll n_bytes of chained incoming packets
2981  */
2982 mblk_t *
2983 ql_ring_rx_poll(void *arg, int n_bytes)
2984 {
2985 	struct rx_ring *rx_ring = (struct rx_ring *)arg;
2986 	qlge_t *qlge = rx_ring->qlge;
2987 	mblk_t *mp = NULL;
2988 	uint32_t var;
2989 
2990 	ASSERT(n_bytes >= 0);
2991 	QL_PRINT(DBG_GLD, ("%s for ring(%d) to read max %d bytes\n",
2992 	    __func__, rx_ring->cq_id, n_bytes));
2993 
2994 	++qlge->rx_polls[rx_ring->cq_id];
2995 
2996 	if (n_bytes == 0)
2997 		return (mp);
2998 	mutex_enter(&rx_ring->rx_lock);
2999 	mp = ql_ring_rx(rx_ring, n_bytes);
3000 	mutex_exit(&rx_ring->rx_lock);
3001 
3002 	if ((rx_ring->cq_id == 0) && (mp == NULL)) {
3003 		var = ql_read_reg(qlge, REG_STATUS);
3004 		/*
3005 		 * Check for fatal error.
3006 		 */
3007 		if ((var & STATUS_FE) != 0) {
3008 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0003);
3009 			var = ql_read_reg(qlge, REG_ERROR_STATUS);
3010 			cmn_err(CE_WARN, "Got fatal error %x.", var);
3011 			ql_wake_asic_reset_soft_intr(qlge);
3012 			if (qlge->fm_enable) {
3013 				atomic_or_32(&qlge->flags, ADAPTER_ERROR);
3014 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
3015 				ddi_fm_service_impact(qlge->dip,
3016 				    DDI_SERVICE_LOST);
3017 			}
3018 		}
3019 		/*
3020 		 * Check MPI processor activity.
3021 		 */
3022 		if ((var & STATUS_PI) != 0) {
3023 			/*
3024 			 * We've got an async event or mailbox completion.
3025 			 * Handle it and clear the source of the interrupt.
3026 			 */
3027 			ql_write_reg(qlge, REG_RSVD7, 0xfeed0004);
3028 			ql_do_mpi_intr(qlge);
3029 		}
3030 	}
3031 
3032 	return (mp);
3033 }
3034 
3035 /*
3036  * MSI-X Multiple Vector Interrupt Handler for inbound (RX) completions.
3037  */
3038 /* ARGSUSED */
3039 static uint_t
3040 ql_msix_rx_isr(caddr_t arg1, caddr_t arg2)
3041 {
3042 	struct rx_ring *rx_ring = (struct rx_ring *)((void *)arg1);
3043 	qlge_t *qlge = rx_ring->qlge;
3044 	mblk_t *mp;
3045 	_NOTE(ARGUNUSED(arg2));
3046 
3047 	QL_PRINT(DBG_INTR, ("%s for ring %d\n", __func__, rx_ring->cq_id));
3048 
3049 	++qlge->rx_interrupts[rx_ring->cq_id];
3050 
3051 	mutex_enter(&rx_ring->rx_lock);
3052 	mp = ql_ring_rx(rx_ring, QLGE_POLL_ALL);
3053 	mutex_exit(&rx_ring->rx_lock);
3054 
3055 	if (mp != NULL)
3056 		RX_UPSTREAM(rx_ring, mp);
3057 
3058 	return (DDI_INTR_CLAIMED);
3059 }
3060 
3061 
3062 /*
3063  *
3064  * Allocate DMA Buffer for ioctl service
3065  *
3066  */
3067 static int
3068 ql_alloc_ioctl_dma_buf(qlge_t *qlge)
3069 {
3070 	uint64_t phy_addr;
3071 	uint64_t alloc_size;
3072 	ddi_dma_cookie_t dma_cookie;
3073 
3074 	alloc_size = qlge->ioctl_buf_dma_attr.mem_len =
3075 	    max(WCS_MPI_CODE_RAM_LENGTH, MEMC_MPI_RAM_LENGTH);
3076 	if (ql_alloc_phys(qlge->dip, &qlge->ioctl_buf_dma_attr.dma_handle,
3077 	    &ql_buf_acc_attr,
3078 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3079 	    &qlge->ioctl_buf_dma_attr.acc_handle,
3080 	    (size_t)alloc_size,  /* mem size */
3081 	    (size_t)0,  /* alignment */
3082 	    (caddr_t *)&qlge->ioctl_buf_dma_attr.vaddr,
3083 	    &dma_cookie) != 0) {
3084 		cmn_err(CE_WARN, "%s(%d): ioctl DMA allocation failed.",
3085 		    __func__, qlge->instance);
3086 		return (DDI_FAILURE);
3087 	}
3088 
3089 	phy_addr = dma_cookie.dmac_laddress;
3090 
3091 	if (qlge->ioctl_buf_dma_attr.vaddr == NULL) {
3092 		cmn_err(CE_WARN, "%s(%d): failed.", __func__, qlge->instance);
3093 		return (DDI_FAILURE);
3094 	}
3095 
3096 	qlge->ioctl_buf_dma_attr.dma_addr = phy_addr;
3097 
3098 	QL_PRINT(DBG_MBX, ("%s: ioctl_dma_buf_virt_addr = 0x%lx, "
3099 	    "phy_addr = 0x%lx\n",
3100 	    __func__, qlge->ioctl_buf_dma_attr.vaddr, phy_addr));
3101 
3102 	return (DDI_SUCCESS);
3103 }
3104 
3105 
3106 /*
3107  * Function to free physical memory.
3108  */
3109 static void
3110 ql_free_phys(ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *acc_handle)
3111 {
3112 	if (dma_handle != NULL) {
3113 		(void) ddi_dma_unbind_handle(*dma_handle);
3114 		if (acc_handle != NULL)
3115 			ddi_dma_mem_free(acc_handle);
3116 		ddi_dma_free_handle(dma_handle);
3117 	}
3118 }
3119 
3120 /*
3121  * Function to free ioctl dma buffer.
3122  */
3123 static void
3124 ql_free_ioctl_dma_buf(qlge_t *qlge)
3125 {
3126 	if (qlge->ioctl_buf_dma_attr.dma_handle != NULL) {
3127 		ql_free_phys(&qlge->ioctl_buf_dma_attr.dma_handle,
3128 		    &qlge->ioctl_buf_dma_attr.acc_handle);
3129 
3130 		qlge->ioctl_buf_dma_attr.vaddr = NULL;
3131 		qlge->ioctl_buf_dma_attr.dma_handle = NULL;
3132 	}
3133 }
3134 
3135 /*
3136  * Free shadow register space used for request and completion queues
3137  */
3138 static void
3139 ql_free_shadow_space(qlge_t *qlge)
3140 {
3141 	if (qlge->host_copy_shadow_dma_attr.dma_handle != NULL) {
3142 		ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3143 		    &qlge->host_copy_shadow_dma_attr.acc_handle);
3144 		bzero(&qlge->host_copy_shadow_dma_attr,
3145 		    sizeof (qlge->host_copy_shadow_dma_attr));
3146 	}
3147 
3148 	if (qlge->buf_q_ptr_base_addr_dma_attr.dma_handle != NULL) {
3149 		ql_free_phys(&qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3150 		    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle);
3151 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3152 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3153 	}
3154 }
3155 
3156 /*
3157  * Allocate shadow register space for request and completion queues
3158  */
3159 static int
3160 ql_alloc_shadow_space(qlge_t *qlge)
3161 {
3162 	ddi_dma_cookie_t dma_cookie;
3163 
3164 	if (ql_alloc_phys(qlge->dip,
3165 	    &qlge->host_copy_shadow_dma_attr.dma_handle,
3166 	    &ql_dev_acc_attr,
3167 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3168 	    &qlge->host_copy_shadow_dma_attr.acc_handle,
3169 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3170 	    (size_t)4, /* 4 bytes alignment */
3171 	    (caddr_t *)&qlge->host_copy_shadow_dma_attr.vaddr,
3172 	    &dma_cookie) != 0) {
3173 		bzero(&qlge->host_copy_shadow_dma_attr,
3174 		    sizeof (qlge->host_copy_shadow_dma_attr));
3175 
3176 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory for "
3177 		    "response shadow registers", __func__, qlge->instance);
3178 		return (DDI_FAILURE);
3179 	}
3180 
3181 	qlge->host_copy_shadow_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3182 
3183 	if (ql_alloc_phys(qlge->dip,
3184 	    &qlge->buf_q_ptr_base_addr_dma_attr.dma_handle,
3185 	    &ql_desc_acc_attr,
3186 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3187 	    &qlge->buf_q_ptr_base_addr_dma_attr.acc_handle,
3188 	    (size_t)VM_PAGE_SIZE,  /* mem size */
3189 	    (size_t)4, /* 4 bytes alignment */
3190 	    (caddr_t *)&qlge->buf_q_ptr_base_addr_dma_attr.vaddr,
3191 	    &dma_cookie) != 0) {
3192 		bzero(&qlge->buf_q_ptr_base_addr_dma_attr,
3193 		    sizeof (qlge->buf_q_ptr_base_addr_dma_attr));
3194 
3195 		cmn_err(CE_WARN, "%s(%d): Unable to allocate DMA memory "
3196 		    "for request shadow registers",
3197 		    __func__, qlge->instance);
3198 		goto err_wqp_sh_area;
3199 	}
3200 	qlge->buf_q_ptr_base_addr_dma_attr.dma_addr = dma_cookie.dmac_laddress;
3201 
3202 	return (DDI_SUCCESS);
3203 
3204 err_wqp_sh_area:
3205 	ql_free_phys(&qlge->host_copy_shadow_dma_attr.dma_handle,
3206 	    &qlge->host_copy_shadow_dma_attr.acc_handle);
3207 	bzero(&qlge->host_copy_shadow_dma_attr,
3208 	    sizeof (qlge->host_copy_shadow_dma_attr));
3209 
3210 	return (DDI_FAILURE);
3211 }
3212 
3213 /*
3214  * Initialize a tx ring
3215  */
3216 static void
3217 ql_init_tx_ring(struct tx_ring *tx_ring)
3218 {
3219 	int i;
3220 	struct ob_mac_iocb_req *mac_iocb_ptr = tx_ring->wq_dma.vaddr;
3221 	struct tx_ring_desc *tx_ring_desc = tx_ring->wq_desc;
3222 
3223 	for (i = 0; i < tx_ring->wq_len; i++) {
3224 		tx_ring_desc->index = i;
3225 		tx_ring_desc->queue_entry = mac_iocb_ptr;
3226 		mac_iocb_ptr++;
3227 		tx_ring_desc++;
3228 	}
3229 	tx_ring->tx_free_count = tx_ring->wq_len;
3230 	tx_ring->queue_stopped = 0;
3231 }
3232 
3233 /*
3234  * Free one tx ring resources
3235  */
3236 static void
3237 ql_free_tx_resources(struct tx_ring *tx_ring)
3238 {
3239 	struct tx_ring_desc *tx_ring_desc;
3240 	int i, j;
3241 
3242 	ql_free_phys(&tx_ring->wq_dma.dma_handle, &tx_ring->wq_dma.acc_handle);
3243 	bzero(&tx_ring->wq_dma, sizeof (tx_ring->wq_dma));
3244 
3245 	if (tx_ring->wq_desc != NULL) {
3246 		tx_ring_desc = tx_ring->wq_desc;
3247 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3248 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3249 				if (tx_ring_desc->tx_dma_handle[j]) {
3250 					/*
3251 					 * The unbinding will happen in tx
3252 					 * completion, here we just free the
3253 					 * handles
3254 					 */
3255 					ddi_dma_free_handle(
3256 					    &(tx_ring_desc->tx_dma_handle[j]));
3257 					tx_ring_desc->tx_dma_handle[j] = NULL;
3258 				}
3259 			}
3260 			if (tx_ring_desc->oal != NULL) {
3261 				tx_ring_desc->oal_dma_addr = 0;
3262 				tx_ring_desc->oal = NULL;
3263 				tx_ring_desc->copy_buffer = NULL;
3264 				tx_ring_desc->copy_buffer_dma_addr = 0;
3265 
3266 				ql_free_phys(&tx_ring_desc->oal_dma.dma_handle,
3267 				    &tx_ring_desc->oal_dma.acc_handle);
3268 			}
3269 		}
3270 		kmem_free(tx_ring->wq_desc,
3271 		    tx_ring->wq_len * sizeof (struct tx_ring_desc));
3272 		tx_ring->wq_desc = NULL;
3273 	}
3274 	/* free the wqicb struct */
3275 	if (tx_ring->wqicb_dma.dma_handle) {
3276 		ql_free_phys(&tx_ring->wqicb_dma.dma_handle,
3277 		    &tx_ring->wqicb_dma.acc_handle);
3278 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3279 	}
3280 }
3281 
3282 /*
3283  * Allocate work (request) queue memory and transmit
3284  * descriptors for this transmit ring
3285  */
3286 static int
3287 ql_alloc_tx_resources(qlge_t *qlge, struct tx_ring *tx_ring)
3288 {
3289 	ddi_dma_cookie_t dma_cookie;
3290 	struct tx_ring_desc *tx_ring_desc;
3291 	int i, j;
3292 	uint32_t length;
3293 
3294 	/* allocate dma buffers for obiocbs */
3295 	if (ql_alloc_phys(qlge->dip, &tx_ring->wq_dma.dma_handle,
3296 	    &ql_desc_acc_attr,
3297 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3298 	    &tx_ring->wq_dma.acc_handle,
3299 	    (size_t)tx_ring->wq_size,	/* mem size */
3300 	    (size_t)128, /* alignment:128 bytes boundary */
3301 	    (caddr_t *)&tx_ring->wq_dma.vaddr,
3302 	    &dma_cookie) != 0) {
3303 		bzero(&tx_ring->wq_dma, sizeof (&tx_ring->wq_dma));
3304 		cmn_err(CE_WARN, "%s(%d): reqQ allocation failed.",
3305 		    __func__, qlge->instance);
3306 		return (DDI_FAILURE);
3307 	}
3308 	tx_ring->wq_dma.dma_addr = dma_cookie.dmac_laddress;
3309 
3310 	tx_ring->wq_desc =
3311 	    kmem_zalloc(tx_ring->wq_len * sizeof (struct tx_ring_desc),
3312 	    KM_NOSLEEP);
3313 	if (tx_ring->wq_desc == NULL) {
3314 		goto err;
3315 	} else {
3316 		tx_ring_desc = tx_ring->wq_desc;
3317 		/*
3318 		 * Allocate a large enough structure to hold the following
3319 		 * 1. oal buffer MAX_SGELEMENTS * sizeof (oal_entry) bytes
3320 		 * 2. copy buffer of QL_MAX_COPY_LENGTH bytes
3321 		 */
3322 		length = (sizeof (struct oal_entry) * MAX_SG_ELEMENTS)
3323 		    + QL_MAX_COPY_LENGTH;
3324 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
3325 
3326 			if (ql_alloc_phys(qlge->dip,
3327 			    &tx_ring_desc->oal_dma.dma_handle,
3328 			    &ql_desc_acc_attr,
3329 			    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3330 			    &tx_ring_desc->oal_dma.acc_handle,
3331 			    (size_t)length,	/* mem size */
3332 			    (size_t)0, /* default alignment:8 bytes boundary */
3333 			    (caddr_t *)&tx_ring_desc->oal_dma.vaddr,
3334 			    &dma_cookie) != 0) {
3335 				bzero(&tx_ring_desc->oal_dma,
3336 				    sizeof (tx_ring_desc->oal_dma));
3337 				cmn_err(CE_WARN, "%s(%d): reqQ tx buf &"
3338 				    "oal alloc failed.",
3339 				    __func__, qlge->instance);
3340 				return (DDI_FAILURE);
3341 			}
3342 
3343 			tx_ring_desc->oal = tx_ring_desc->oal_dma.vaddr;
3344 			tx_ring_desc->oal_dma_addr = dma_cookie.dmac_laddress;
3345 			tx_ring_desc->copy_buffer =
3346 			    (caddr_t)((uint8_t *)tx_ring_desc->oal
3347 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3348 			tx_ring_desc->copy_buffer_dma_addr =
3349 			    (tx_ring_desc->oal_dma_addr
3350 			    + (sizeof (struct oal_entry) * MAX_SG_ELEMENTS));
3351 
3352 			/* Allocate dma handles for transmit buffers */
3353 			for (j = 0; j < QL_MAX_TX_DMA_HANDLES; j++) {
3354 				if (ddi_dma_alloc_handle(qlge->dip,
3355 				    &tx_mapping_dma_attr,
3356 				    DDI_DMA_DONTWAIT,
3357 				    0, &tx_ring_desc->tx_dma_handle[j])
3358 				    != DDI_SUCCESS) {
3359 					cmn_err(CE_WARN,
3360 					    "!%s: ddi_dma_alloc_handle: "
3361 					    "tx_dma_handle "
3362 					    "alloc failed", __func__);
3363 					goto err;
3364 				}
3365 			}
3366 		}
3367 	}
3368 	/* alloc a wqicb control block to load this tx ring to hw */
3369 	if (ql_alloc_phys(qlge->dip, &tx_ring->wqicb_dma.dma_handle,
3370 	    &ql_desc_acc_attr,
3371 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3372 	    &tx_ring->wqicb_dma.acc_handle,
3373 	    (size_t)sizeof (struct wqicb_t),	/* mem size */
3374 	    (size_t)0, /* alignment:128 bytes boundary */
3375 	    (caddr_t *)&tx_ring->wqicb_dma.vaddr,
3376 	    &dma_cookie) != 0) {
3377 		bzero(&tx_ring->wqicb_dma, sizeof (tx_ring->wqicb_dma));
3378 		cmn_err(CE_WARN, "%s(%d): wqicb allocation failed.",
3379 		    __func__, qlge->instance);
3380 		return (DDI_FAILURE);
3381 	}
3382 	tx_ring->wqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3383 
3384 	return (DDI_SUCCESS);
3385 
3386 err:
3387 	ql_free_tx_resources(tx_ring);
3388 	return (DDI_FAILURE);
3389 }
3390 
3391 /*
3392  * Free one rx ring resources
3393  */
3394 static void
3395 ql_free_rx_resources(struct rx_ring *rx_ring)
3396 {
3397 	/* Free the small buffer queue. */
3398 	if (rx_ring->sbq_dma.dma_handle) {
3399 		ql_free_phys(&rx_ring->sbq_dma.dma_handle,
3400 		    &rx_ring->sbq_dma.acc_handle);
3401 		bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3402 	}
3403 
3404 	/* Free the small buffer queue control blocks. */
3405 	kmem_free(rx_ring->sbq_desc, rx_ring->sbq_len *
3406 	    sizeof (struct bq_desc));
3407 	rx_ring->sbq_desc = NULL;
3408 
3409 	/* Free the large buffer queue. */
3410 	if (rx_ring->lbq_dma.dma_handle) {
3411 		ql_free_phys(&rx_ring->lbq_dma.dma_handle,
3412 		    &rx_ring->lbq_dma.acc_handle);
3413 		bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3414 	}
3415 
3416 	/* Free the large buffer queue control blocks. */
3417 	kmem_free(rx_ring->lbq_desc, rx_ring->lbq_len *
3418 	    sizeof (struct bq_desc));
3419 	rx_ring->lbq_desc = NULL;
3420 
3421 	/* Free cqicb struct */
3422 	if (rx_ring->cqicb_dma.dma_handle) {
3423 		ql_free_phys(&rx_ring->cqicb_dma.dma_handle,
3424 		    &rx_ring->cqicb_dma.acc_handle);
3425 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3426 	}
3427 	/* Free the rx queue. */
3428 	if (rx_ring->cq_dma.dma_handle) {
3429 		ql_free_phys(&rx_ring->cq_dma.dma_handle,
3430 		    &rx_ring->cq_dma.acc_handle);
3431 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3432 	}
3433 }
3434 
3435 /*
3436  * Allocate queues and buffers for this completions queue based
3437  * on the values in the parameter structure.
3438  */
3439 static int
3440 ql_alloc_rx_resources(qlge_t *qlge, struct rx_ring *rx_ring)
3441 {
3442 	ddi_dma_cookie_t dma_cookie;
3443 
3444 	if (ql_alloc_phys(qlge->dip, &rx_ring->cq_dma.dma_handle,
3445 	    &ql_desc_acc_attr,
3446 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3447 	    &rx_ring->cq_dma.acc_handle,
3448 	    (size_t)rx_ring->cq_size,  /* mem size */
3449 	    (size_t)128, /* alignment:128 bytes boundary */
3450 	    (caddr_t *)&rx_ring->cq_dma.vaddr,
3451 	    &dma_cookie) != 0)	{
3452 		bzero(&rx_ring->cq_dma, sizeof (rx_ring->cq_dma));
3453 		cmn_err(CE_WARN, "%s(%d): rspQ allocation failed.",
3454 		    __func__, qlge->instance);
3455 		return (DDI_FAILURE);
3456 	}
3457 	rx_ring->cq_dma.dma_addr = dma_cookie.dmac_laddress;
3458 
3459 	if (rx_ring->sbq_len != 0) {
3460 		/*
3461 		 * Allocate small buffer queue.
3462 		 */
3463 		if (ql_alloc_phys(qlge->dip, &rx_ring->sbq_dma.dma_handle,
3464 		    &ql_desc_acc_attr,
3465 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3466 		    &rx_ring->sbq_dma.acc_handle,
3467 		    (size_t)rx_ring->sbq_size,  /* mem size */
3468 		    (size_t)128, /* alignment:128 bytes boundary */
3469 		    (caddr_t *)&rx_ring->sbq_dma.vaddr,
3470 		    &dma_cookie) != 0) {
3471 			bzero(&rx_ring->sbq_dma, sizeof (rx_ring->sbq_dma));
3472 			cmn_err(CE_WARN,
3473 			    "%s(%d): small buffer queue allocation failed.",
3474 			    __func__, qlge->instance);
3475 			goto err_mem;
3476 		}
3477 		rx_ring->sbq_dma.dma_addr = dma_cookie.dmac_laddress;
3478 
3479 		/*
3480 		 * Allocate small buffer queue control blocks.
3481 		 */
3482 		rx_ring->sbq_desc =
3483 		    kmem_zalloc(rx_ring->sbq_len * sizeof (struct bq_desc),
3484 		    KM_NOSLEEP);
3485 		if (rx_ring->sbq_desc == NULL) {
3486 			cmn_err(CE_WARN,
3487 			    "sbq control block allocation failed.");
3488 			goto err_mem;
3489 		}
3490 
3491 		ql_init_sbq_ring(rx_ring);
3492 	}
3493 
3494 	if (rx_ring->lbq_len != 0) {
3495 		/*
3496 		 * Allocate large buffer queue.
3497 		 */
3498 		if (ql_alloc_phys(qlge->dip, &rx_ring->lbq_dma.dma_handle,
3499 		    &ql_desc_acc_attr,
3500 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3501 		    &rx_ring->lbq_dma.acc_handle,
3502 		    (size_t)rx_ring->lbq_size,  /* mem size */
3503 		    (size_t)128, /* alignment:128 bytes boundary */
3504 		    (caddr_t *)&rx_ring->lbq_dma.vaddr,
3505 		    &dma_cookie) != 0) {
3506 			bzero(&rx_ring->lbq_dma, sizeof (rx_ring->lbq_dma));
3507 			cmn_err(CE_WARN, "%s(%d): lbq allocation failed.",
3508 			    __func__, qlge->instance);
3509 			goto err_mem;
3510 		}
3511 		rx_ring->lbq_dma.dma_addr = dma_cookie.dmac_laddress;
3512 
3513 		/*
3514 		 * Allocate large buffer queue control blocks.
3515 		 */
3516 		rx_ring->lbq_desc =
3517 		    kmem_zalloc(rx_ring->lbq_len * sizeof (struct bq_desc),
3518 		    KM_NOSLEEP);
3519 		if (rx_ring->lbq_desc == NULL) {
3520 			cmn_err(CE_WARN,
3521 			    "Large buffer queue control block allocation "
3522 			    "failed.");
3523 			goto err_mem;
3524 		}
3525 		ql_init_lbq_ring(rx_ring);
3526 	}
3527 
3528 	if (ql_alloc_phys(qlge->dip, &rx_ring->cqicb_dma.dma_handle,
3529 	    &ql_desc_acc_attr,
3530 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3531 	    &rx_ring->cqicb_dma.acc_handle,
3532 	    (size_t)sizeof (struct cqicb_t),  /* mem size */
3533 	    (size_t)0, /* alignment:128 bytes boundary */
3534 	    (caddr_t *)&rx_ring->cqicb_dma.vaddr,
3535 	    &dma_cookie) != 0) {
3536 		bzero(&rx_ring->cqicb_dma, sizeof (rx_ring->cqicb_dma));
3537 		cmn_err(CE_WARN, "%s(%d): cqicb allocation failed.",
3538 		    __func__, qlge->instance);
3539 		return (DDI_FAILURE);
3540 	}
3541 	rx_ring->cqicb_dma.dma_addr = dma_cookie.dmac_laddress;
3542 
3543 	return (DDI_SUCCESS);
3544 
3545 err_mem:
3546 	ql_free_rx_resources(rx_ring);
3547 	return (DDI_FAILURE);
3548 }
3549 
3550 /*
3551  * Frees tx/rx queues memory resources
3552  */
3553 static void
3554 ql_free_mem_resources(qlge_t *qlge)
3555 {
3556 	int i;
3557 
3558 	if (qlge->ricb_dma.dma_handle) {
3559 		/* free the ricb struct */
3560 		ql_free_phys(&qlge->ricb_dma.dma_handle,
3561 		    &qlge->ricb_dma.acc_handle);
3562 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3563 	}
3564 
3565 	ql_free_rx_buffers(qlge);
3566 
3567 	ql_free_ioctl_dma_buf(qlge);
3568 
3569 	for (i = 0; i < qlge->tx_ring_count; i++)
3570 		ql_free_tx_resources(&qlge->tx_ring[i]);
3571 
3572 	for (i = 0; i < qlge->rx_ring_count; i++)
3573 		ql_free_rx_resources(&qlge->rx_ring[i]);
3574 
3575 	ql_free_shadow_space(qlge);
3576 }
3577 
3578 /*
3579  * Allocate buffer queues, large buffers and small buffers etc
3580  *
3581  * This API is called in the gld_attach member function. It is called
3582  * only once.  Later reset,reboot should not re-allocate all rings and
3583  * buffers.
3584  */
3585 static int
3586 ql_alloc_mem_resources(qlge_t *qlge)
3587 {
3588 	int i;
3589 	ddi_dma_cookie_t dma_cookie;
3590 
3591 	/* Allocate space for our shadow registers */
3592 	if (ql_alloc_shadow_space(qlge))
3593 		return (DDI_FAILURE);
3594 
3595 	for (i = 0; i < qlge->rx_ring_count; i++) {
3596 		if (ql_alloc_rx_resources(qlge, &qlge->rx_ring[i]) != 0) {
3597 			cmn_err(CE_WARN, "RX resource allocation failed.");
3598 			goto err_mem;
3599 		}
3600 	}
3601 	/* Allocate tx queue resources */
3602 	for (i = 0; i < qlge->tx_ring_count; i++) {
3603 		if (ql_alloc_tx_resources(qlge, &qlge->tx_ring[i]) != 0) {
3604 			cmn_err(CE_WARN, "Tx resource allocation failed.");
3605 			goto err_mem;
3606 		}
3607 	}
3608 
3609 	if (ql_alloc_ioctl_dma_buf(qlge) != DDI_SUCCESS) {
3610 		goto err_mem;
3611 	}
3612 
3613 	if (ql_alloc_rx_buffers(qlge) != DDI_SUCCESS) {
3614 		cmn_err(CE_WARN, "?%s(%d): ql_alloc_rx_buffers failed",
3615 		    __func__, qlge->instance);
3616 		goto err_mem;
3617 	}
3618 
3619 	qlge->sequence |= INIT_ALLOC_RX_BUF;
3620 
3621 	if (ql_alloc_phys(qlge->dip, &qlge->ricb_dma.dma_handle,
3622 	    &ql_desc_acc_attr,
3623 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3624 	    &qlge->ricb_dma.acc_handle,
3625 	    (size_t)sizeof (struct ricb),  /* mem size */
3626 	    (size_t)0, /* alignment:128 bytes boundary */
3627 	    (caddr_t *)&qlge->ricb_dma.vaddr,
3628 	    &dma_cookie) != 0) {
3629 		bzero(&qlge->ricb_dma, sizeof (qlge->ricb_dma));
3630 		cmn_err(CE_WARN, "%s(%d): ricb allocation failed.",
3631 		    __func__, qlge->instance);
3632 		return (DDI_FAILURE);
3633 	}
3634 	qlge->ricb_dma.dma_addr = dma_cookie.dmac_laddress;
3635 
3636 	return (DDI_SUCCESS);
3637 
3638 err_mem:
3639 	return (DDI_FAILURE);
3640 }
3641 
3642 
3643 /*
3644  * Function used to allocate physical memory and zero it.
3645  */
3646 
3647 static int
3648 ql_alloc_phys_rbuf(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3649     ddi_device_acc_attr_t *device_acc_attr,
3650     uint_t dma_flags,
3651     ddi_acc_handle_t *acc_handle,
3652     size_t size,
3653     size_t alignment,
3654     caddr_t *vaddr,
3655     ddi_dma_cookie_t *dma_cookie)
3656 {
3657 	size_t rlen;
3658 	uint_t cnt;
3659 
3660 	/*
3661 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3662 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3663 	 * to make sure buffer has enough room for overrun.
3664 	 */
3665 	if (size & 7) {
3666 		size += 8 - (size & 7);
3667 	}
3668 
3669 	/* Adjust the alignment if requested */
3670 	if (alignment) {
3671 		dma_attr.dma_attr_align = alignment;
3672 	}
3673 
3674 	/*
3675 	 * Allocate DMA handle
3676 	 */
3677 	if (ddi_dma_alloc_handle(dip, &dma_attr_rbuf, DDI_DMA_DONTWAIT, NULL,
3678 	    dma_handle) != DDI_SUCCESS) {
3679 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3680 		    __func__);
3681 		return (QL_ERROR);
3682 	}
3683 	/*
3684 	 * Allocate DMA memory
3685 	 */
3686 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3687 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3688 	    DDI_DMA_DONTWAIT,
3689 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3690 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3691 		ddi_dma_free_handle(dma_handle);
3692 		return (QL_ERROR);
3693 	}
3694 
3695 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3696 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3697 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3698 		ddi_dma_mem_free(acc_handle);
3699 
3700 		ddi_dma_free_handle(dma_handle);
3701 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3702 		    __func__);
3703 		return (QL_ERROR);
3704 	}
3705 
3706 	if (cnt != 1) {
3707 
3708 		ql_free_phys(dma_handle, acc_handle);
3709 
3710 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3711 		    __func__);
3712 		return (QL_ERROR);
3713 	}
3714 
3715 	bzero((caddr_t)*vaddr, rlen);
3716 
3717 	return (0);
3718 }
3719 
3720 /*
3721  * Function used to allocate physical memory and zero it.
3722  */
3723 static int
3724 ql_alloc_phys(dev_info_t *dip, ddi_dma_handle_t *dma_handle,
3725     ddi_device_acc_attr_t *device_acc_attr,
3726     uint_t dma_flags,
3727     ddi_acc_handle_t *acc_handle,
3728     size_t size,
3729     size_t alignment,
3730     caddr_t *vaddr,
3731     ddi_dma_cookie_t *dma_cookie)
3732 {
3733 	size_t rlen;
3734 	uint_t cnt;
3735 
3736 	/*
3737 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
3738 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
3739 	 * to make sure buffer has enough room for overrun.
3740 	 */
3741 	if (size & 7) {
3742 		size += 8 - (size & 7);
3743 	}
3744 
3745 	/* Adjust the alignment if requested */
3746 	if (alignment) {
3747 		dma_attr.dma_attr_align = alignment;
3748 	}
3749 
3750 	/*
3751 	 * Allocate DMA handle
3752 	 */
3753 	if (ddi_dma_alloc_handle(dip, &dma_attr, DDI_DMA_DONTWAIT, NULL,
3754 	    dma_handle) != DDI_SUCCESS) {
3755 		cmn_err(CE_WARN, QL_BANG "%s:  ddi_dma_alloc_handle FAILED",
3756 		    __func__);
3757 		return (QL_ERROR);
3758 	}
3759 	/*
3760 	 * Allocate DMA memory
3761 	 */
3762 	if (ddi_dma_mem_alloc(*dma_handle, size, device_acc_attr,
3763 	    dma_flags & (DDI_DMA_CONSISTENT|DDI_DMA_STREAMING),
3764 	    DDI_DMA_DONTWAIT,
3765 	    NULL, vaddr, &rlen, acc_handle) != DDI_SUCCESS) {
3766 		cmn_err(CE_WARN, "alloc_phys: DMA Memory alloc Failed");
3767 		ddi_dma_free_handle(dma_handle);
3768 		return (QL_ERROR);
3769 	}
3770 
3771 	if (ddi_dma_addr_bind_handle(*dma_handle, NULL, *vaddr, rlen,
3772 	    dma_flags, DDI_DMA_DONTWAIT, NULL,
3773 	    dma_cookie, &cnt) != DDI_DMA_MAPPED) {
3774 		ddi_dma_mem_free(acc_handle);
3775 
3776 		ddi_dma_free_handle(dma_handle);
3777 		cmn_err(CE_WARN, "%s ddi_dma_addr_bind_handle FAILED",
3778 		    __func__);
3779 		return (QL_ERROR);
3780 	}
3781 
3782 	if (cnt != 1) {
3783 
3784 		ql_free_phys(dma_handle, acc_handle);
3785 
3786 		cmn_err(CE_WARN, "%s: cnt != 1; Failed segment count",
3787 		    __func__);
3788 		return (QL_ERROR);
3789 	}
3790 
3791 	bzero((caddr_t)*vaddr, rlen);
3792 
3793 	return (0);
3794 }
3795 
3796 /*
3797  * Add interrupt handlers based on the interrupt type.
3798  * Before adding the interrupt handlers, the interrupt vectors should
3799  * have been allocated, and the rx/tx rings have also been allocated.
3800  */
3801 static int
3802 ql_add_intr_handlers(qlge_t *qlge)
3803 {
3804 	int vector = 0;
3805 	int rc, i;
3806 	uint32_t value;
3807 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3808 
3809 	switch (qlge->intr_type) {
3810 	case DDI_INTR_TYPE_MSIX:
3811 		/*
3812 		 * Add interrupt handler for rx and tx rings: vector[0 -
3813 		 * (qlge->intr_cnt -1)].
3814 		 */
3815 		value = 0;
3816 		for (vector = 0; vector < qlge->intr_cnt; vector++) {
3817 			ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3818 
3819 			/*
3820 			 * associate interrupt vector with interrupt handler
3821 			 */
3822 			rc = ddi_intr_add_handler(qlge->htable[vector],
3823 			    (ddi_intr_handler_t *)intr_ctx->handler,
3824 			    (void *)&qlge->rx_ring[vector], NULL);
3825 
3826 			QL_PRINT(DBG_INIT, ("rx_ring[%d] 0x%p\n",
3827 			    vector, &qlge->rx_ring[vector]));
3828 			if (rc != DDI_SUCCESS) {
3829 				QL_PRINT(DBG_INIT,
3830 				    ("Add rx interrupt handler failed. "
3831 				    "return: %d, vector: %d", rc, vector));
3832 				for (vector--; vector >= 0; vector--) {
3833 					(void) ddi_intr_remove_handler(
3834 					    qlge->htable[vector]);
3835 				}
3836 				return (DDI_FAILURE);
3837 			}
3838 			intr_ctx++;
3839 		}
3840 		break;
3841 
3842 	case DDI_INTR_TYPE_MSI:
3843 		/*
3844 		 * Add interrupt handlers for the only vector
3845 		 */
3846 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3847 
3848 		rc = ddi_intr_add_handler(qlge->htable[vector],
3849 		    ql_isr,
3850 		    (caddr_t)&qlge->rx_ring[0], NULL);
3851 
3852 		if (rc != DDI_SUCCESS) {
3853 			QL_PRINT(DBG_INIT,
3854 			    ("Add MSI interrupt handler failed: %d\n", rc));
3855 			return (DDI_FAILURE);
3856 		}
3857 		break;
3858 
3859 	case DDI_INTR_TYPE_FIXED:
3860 		/*
3861 		 * Add interrupt handlers for the only vector
3862 		 */
3863 		ql_atomic_set_32(&intr_ctx->irq_cnt, value);
3864 
3865 		rc = ddi_intr_add_handler(qlge->htable[vector],
3866 		    ql_isr,
3867 		    (caddr_t)&qlge->rx_ring[0], NULL);
3868 
3869 		if (rc != DDI_SUCCESS) {
3870 			QL_PRINT(DBG_INIT,
3871 			    ("Add legacy interrupt handler failed: %d\n", rc));
3872 			return (DDI_FAILURE);
3873 		}
3874 		break;
3875 
3876 	default:
3877 		return (DDI_FAILURE);
3878 	}
3879 
3880 	/* Enable interrupts */
3881 	/* Block enable */
3882 	if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
3883 		QL_PRINT(DBG_INIT, ("Block enabling %d interrupt(s)\n",
3884 		    qlge->intr_cnt));
3885 		(void) ddi_intr_block_enable(qlge->htable, qlge->intr_cnt);
3886 	} else { /* Non block enable */
3887 		for (i = 0; i < qlge->intr_cnt; i++) {
3888 			QL_PRINT(DBG_INIT, ("Non Block Enabling interrupt %d "
3889 			    "handle 0x%x\n", i, qlge->htable[i]));
3890 			(void) ddi_intr_enable(qlge->htable[i]);
3891 		}
3892 	}
3893 	qlge->sequence |= INIT_INTR_ENABLED;
3894 
3895 	return (DDI_SUCCESS);
3896 }
3897 
3898 /*
3899  * Here we build the intr_ctx structures based on
3900  * our rx_ring count and intr vector count.
3901  * The intr_ctx structure is used to hook each vector
3902  * to possibly different handlers.
3903  */
3904 static void
3905 ql_resolve_queues_to_irqs(qlge_t *qlge)
3906 {
3907 	int i = 0;
3908 	struct intr_ctx *intr_ctx = &qlge->intr_ctx[0];
3909 
3910 	if (qlge->intr_type == DDI_INTR_TYPE_MSIX) {
3911 		/*
3912 		 * Each rx_ring has its own intr_ctx since we
3913 		 * have separate vectors for each queue.
3914 		 * This only true when MSI-X is enabled.
3915 		 */
3916 		for (i = 0; i < qlge->intr_cnt; i++, intr_ctx++) {
3917 			qlge->rx_ring[i].irq = i;
3918 			intr_ctx->intr = i;
3919 			intr_ctx->qlge = qlge;
3920 
3921 			/*
3922 			 * We set up each vectors enable/disable/read bits so
3923 			 * there's no bit/mask calculations in critical path.
3924 			 */
3925 			intr_ctx->intr_en_mask =
3926 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3927 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
3928 			    INTR_EN_IHD | i;
3929 			intr_ctx->intr_dis_mask =
3930 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3931 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3932 			    INTR_EN_IHD | i;
3933 			intr_ctx->intr_read_mask =
3934 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3935 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
3936 			    | i;
3937 
3938 			if (i == 0) {
3939 				/*
3940 				 * Default queue handles bcast/mcast plus
3941 				 * async events.
3942 				 */
3943 				intr_ctx->handler = ql_isr;
3944 			} else if (qlge->rx_ring[i].type == TX_Q) {
3945 				/*
3946 				 * Outbound queue is for outbound completions
3947 				 * only.
3948 				 */
3949 				if (qlge->isr_stride)
3950 					intr_ctx->handler = ql_msix_isr;
3951 				else
3952 					intr_ctx->handler = ql_msix_tx_isr;
3953 			} else {
3954 				/*
3955 				 * Inbound queues handle unicast frames only.
3956 				 */
3957 				if (qlge->isr_stride)
3958 					intr_ctx->handler = ql_msix_isr;
3959 				else
3960 					intr_ctx->handler = ql_msix_rx_isr;
3961 			}
3962 		}
3963 		i = qlge->intr_cnt;
3964 		for (; i < qlge->rx_ring_count; i++, intr_ctx++) {
3965 			int iv = i - qlge->isr_stride;
3966 			qlge->rx_ring[i].irq = iv;
3967 			intr_ctx->intr = iv;
3968 			intr_ctx->qlge = qlge;
3969 
3970 			/*
3971 			 * We set up each vectors enable/disable/read bits so
3972 			 * there's no bit/mask calculations in critical path.
3973 			 */
3974 			intr_ctx->intr_en_mask =
3975 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3976 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK |
3977 			    INTR_EN_IHD | iv;
3978 			intr_ctx->intr_dis_mask =
3979 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3980 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3981 			    INTR_EN_IHD | iv;
3982 			intr_ctx->intr_read_mask =
3983 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3984 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD
3985 			    | iv;
3986 
3987 			if (qlge->rx_ring[i].type == TX_Q) {
3988 				/*
3989 				 * Outbound queue is for outbound completions
3990 				 * only.
3991 				 */
3992 				intr_ctx->handler = ql_msix_isr;
3993 			} else {
3994 				/*
3995 				 * Inbound queues handle unicast frames only.
3996 				 */
3997 				intr_ctx->handler = ql_msix_rx_isr;
3998 			}
3999 		}
4000 	} else {
4001 		/*
4002 		 * All rx_rings use the same intr_ctx since
4003 		 * there is only one vector.
4004 		 */
4005 		intr_ctx->intr = 0;
4006 		intr_ctx->qlge = qlge;
4007 		/*
4008 		 * We set up each vectors enable/disable/read bits so
4009 		 * there's no bit/mask calculations in the critical path.
4010 		 */
4011 		intr_ctx->intr_en_mask =
4012 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4013 		    INTR_EN_TYPE_ENABLE;
4014 		intr_ctx->intr_dis_mask =
4015 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4016 		    INTR_EN_TYPE_DISABLE;
4017 		intr_ctx->intr_read_mask =
4018 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
4019 		    INTR_EN_TYPE_READ;
4020 		/*
4021 		 * Single interrupt means one handler for all rings.
4022 		 */
4023 		intr_ctx->handler = ql_isr;
4024 		for (i = 0; i < qlge->rx_ring_count; i++)
4025 			qlge->rx_ring[i].irq = 0;
4026 	}
4027 }
4028 
4029 
4030 /*
4031  * Free allocated interrupts.
4032  */
4033 static void
4034 ql_free_irq_vectors(qlge_t *qlge)
4035 {
4036 	int i;
4037 	int rc;
4038 
4039 	if (qlge->sequence & INIT_INTR_ENABLED) {
4040 		/* Disable all interrupts */
4041 		if (qlge->intr_cap & DDI_INTR_FLAG_BLOCK) {
4042 			/* Call ddi_intr_block_disable() */
4043 			(void) ddi_intr_block_disable(qlge->htable,
4044 			    qlge->intr_cnt);
4045 		} else {
4046 			for (i = 0; i < qlge->intr_cnt; i++) {
4047 				(void) ddi_intr_disable(qlge->htable[i]);
4048 			}
4049 		}
4050 
4051 		qlge->sequence &= ~INIT_INTR_ENABLED;
4052 	}
4053 
4054 	for (i = 0; i < qlge->intr_cnt; i++) {
4055 
4056 		if (qlge->sequence & INIT_ADD_INTERRUPT)
4057 			(void) ddi_intr_remove_handler(qlge->htable[i]);
4058 
4059 		if (qlge->sequence & INIT_INTR_ALLOC) {
4060 			rc = ddi_intr_free(qlge->htable[i]);
4061 			if (rc != DDI_SUCCESS) {
4062 				/* EMPTY */
4063 				QL_PRINT(DBG_INIT, ("Free intr failed: %d",
4064 				    rc));
4065 			}
4066 		}
4067 	}
4068 	if (qlge->sequence & INIT_INTR_ALLOC)
4069 		qlge->sequence &= ~INIT_INTR_ALLOC;
4070 
4071 	if (qlge->sequence & INIT_ADD_INTERRUPT)
4072 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4073 
4074 	if (qlge->htable) {
4075 		kmem_free(qlge->htable, qlge->intr_size);
4076 		qlge->htable = NULL;
4077 	}
4078 }
4079 
4080 /*
4081  * Allocate interrupt vectors
4082  * For legacy and MSI, only 1 handle is needed.
4083  * For MSI-X, if fewer than 2 vectors are available, return failure.
4084  * Upon success, this maps the vectors to rx and tx rings for
4085  * interrupts.
4086  */
4087 static int
4088 ql_request_irq_vectors(qlge_t *qlge, int intr_type)
4089 {
4090 	dev_info_t *devinfo;
4091 	uint32_t request, orig;
4092 	int count, avail, actual;
4093 	int minimum;
4094 	int rc;
4095 
4096 	devinfo = qlge->dip;
4097 
4098 	switch (intr_type) {
4099 	case DDI_INTR_TYPE_FIXED:
4100 		request = 1;	/* Request 1 legacy interrupt handle */
4101 		minimum = 1;
4102 		QL_PRINT(DBG_INIT, ("interrupt type: legacy\n"));
4103 		break;
4104 
4105 	case DDI_INTR_TYPE_MSI:
4106 		request = 1;	/* Request 1 MSI interrupt handle */
4107 		minimum = 1;
4108 		QL_PRINT(DBG_INIT, ("interrupt type: MSI\n"));
4109 		break;
4110 
4111 	case DDI_INTR_TYPE_MSIX:
4112 		/*
4113 		 * Ideal number of vectors for the adapter is
4114 		 * # rss rings + tx completion rings for default completion
4115 		 * queue.
4116 		 */
4117 		request = qlge->rx_ring_count;
4118 
4119 		orig = request;
4120 		if (request > (MAX_RX_RINGS))
4121 			request = MAX_RX_RINGS;
4122 		minimum = 2;
4123 		QL_PRINT(DBG_INIT, ("interrupt type: MSI-X\n"));
4124 		break;
4125 
4126 	default:
4127 		QL_PRINT(DBG_INIT, ("Invalid parameter\n"));
4128 		return (DDI_FAILURE);
4129 	}
4130 
4131 	QL_PRINT(DBG_INIT, ("interrupt handles requested: %d  minimum: %d\n",
4132 	    request, minimum));
4133 
4134 	/*
4135 	 * Get number of supported interrupts
4136 	 */
4137 	rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4138 	if ((rc != DDI_SUCCESS) || (count < minimum)) {
4139 		QL_PRINT(DBG_INIT, ("Get interrupt number failed. Return: %d, "
4140 		    "count: %d\n", rc, count));
4141 		return (DDI_FAILURE);
4142 	}
4143 	QL_PRINT(DBG_INIT, ("interrupts supported: %d\n", count));
4144 
4145 	/*
4146 	 * Get number of available interrupts
4147 	 */
4148 	rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
4149 	if ((rc != DDI_SUCCESS) || (avail < minimum)) {
4150 		QL_PRINT(DBG_INIT,
4151 		    ("Get interrupt available number failed. Return:"
4152 		    " %d, available: %d\n", rc, avail));
4153 		return (DDI_FAILURE);
4154 	}
4155 	QL_PRINT(DBG_INIT, ("interrupts available: %d\n", avail));
4156 
4157 	if (avail < request) {
4158 		QL_PRINT(DBG_INIT, ("Request %d handles, %d available\n",
4159 		    request, avail));
4160 		request = avail;
4161 	}
4162 
4163 	actual = 0;
4164 	qlge->intr_cnt = 0;
4165 
4166 	/*
4167 	 * Allocate an array of interrupt handles
4168 	 */
4169 	qlge->intr_size = (size_t)(request * sizeof (ddi_intr_handle_t));
4170 	qlge->htable = kmem_alloc(qlge->intr_size, KM_SLEEP);
4171 
4172 	rc = ddi_intr_alloc(devinfo, qlge->htable, intr_type, 0,
4173 	    (int)request, &actual, DDI_INTR_ALLOC_NORMAL);
4174 	if (rc != DDI_SUCCESS) {
4175 		cmn_err(CE_WARN, "%s(%d) Allocate interrupts failed. return:"
4176 		    " %d, request: %d, actual: %d",
4177 		    __func__, qlge->instance, rc, request, actual);
4178 		goto ql_intr_alloc_fail;
4179 	}
4180 	qlge->intr_cnt = actual;
4181 
4182 	qlge->sequence |= INIT_INTR_ALLOC;
4183 
4184 	/*
4185 	 * If the actual number of vectors is less than the minumum
4186 	 * then fail.
4187 	 */
4188 	if (actual < minimum) {
4189 		cmn_err(CE_WARN,
4190 		    "Insufficient interrupt handles available: %d", actual);
4191 		goto ql_intr_alloc_fail;
4192 	}
4193 
4194 	/*
4195 	 * For MSI-X, actual might force us to reduce number of tx & rx rings
4196 	 */
4197 	if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) {
4198 		if (actual >= (orig / 2)) {
4199 			count = orig / 2;
4200 			qlge->rss_ring_count = count;
4201 			qlge->tx_ring_count = count;
4202 			qlge->isr_stride = count;
4203 		} else if (actual >= (orig / 4)) {
4204 			count = orig / 4;
4205 			qlge->rss_ring_count = count;
4206 			qlge->tx_ring_count = count;
4207 			qlge->isr_stride = count;
4208 		} else if (actual >= (orig / 8)) {
4209 			count = orig / 8;
4210 			qlge->rss_ring_count = count;
4211 			qlge->tx_ring_count = count;
4212 			qlge->isr_stride = count;
4213 		} else if (actual < MAX_RX_RINGS) {
4214 			qlge->tx_ring_count = 1;
4215 			qlge->rss_ring_count = actual - 1;
4216 		}
4217 		qlge->intr_cnt = count;
4218 		qlge->rx_ring_count = qlge->tx_ring_count +
4219 		    qlge->rss_ring_count;
4220 	}
4221 	cmn_err(CE_NOTE, "!qlge(%d) tx %d, rss %d, stride %d\n", qlge->instance,
4222 	    qlge->tx_ring_count, qlge->rss_ring_count, qlge->isr_stride);
4223 
4224 	/*
4225 	 * Get priority for first vector, assume remaining are all the same
4226 	 */
4227 	rc = ddi_intr_get_pri(qlge->htable[0], &qlge->intr_pri);
4228 	if (rc != DDI_SUCCESS) {
4229 		QL_PRINT(DBG_INIT, ("Get interrupt priority failed: %d\n", rc));
4230 		goto ql_intr_alloc_fail;
4231 	}
4232 
4233 	rc = ddi_intr_get_cap(qlge->htable[0], &qlge->intr_cap);
4234 	if (rc != DDI_SUCCESS) {
4235 		QL_PRINT(DBG_INIT, ("Get interrupt cap failed: %d\n", rc));
4236 		goto ql_intr_alloc_fail;
4237 	}
4238 
4239 	qlge->intr_type = intr_type;
4240 
4241 	return (DDI_SUCCESS);
4242 
4243 ql_intr_alloc_fail:
4244 	ql_free_irq_vectors(qlge);
4245 
4246 	return (DDI_FAILURE);
4247 }
4248 
4249 /*
4250  * Allocate interrupt vector(s) for one of the following interrupt types, MSI-X,
4251  * MSI or Legacy. In MSI and Legacy modes we only support a single receive and
4252  * transmit queue.
4253  */
4254 int
4255 ql_alloc_irqs(qlge_t *qlge)
4256 {
4257 	int intr_types;
4258 	int rval;
4259 
4260 	/*
4261 	 * Get supported interrupt types
4262 	 */
4263 	if (ddi_intr_get_supported_types(qlge->dip, &intr_types)
4264 	    != DDI_SUCCESS) {
4265 		cmn_err(CE_WARN, "%s(%d):ddi_intr_get_supported_types failed",
4266 		    __func__, qlge->instance);
4267 
4268 		return (DDI_FAILURE);
4269 	}
4270 
4271 	QL_PRINT(DBG_INIT, ("%s(%d) Interrupt types supported %d\n",
4272 	    __func__, qlge->instance, intr_types));
4273 
4274 	/* Install MSI-X interrupts */
4275 	if ((intr_types & DDI_INTR_TYPE_MSIX) != 0) {
4276 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt supported %d\n",
4277 		    __func__, qlge->instance, intr_types));
4278 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSIX);
4279 		if (rval == DDI_SUCCESS) {
4280 			return (rval);
4281 		}
4282 		QL_PRINT(DBG_INIT, ("%s(%d) MSI-X interrupt allocation failed,"
4283 		    " trying MSI interrupts ...\n", __func__, qlge->instance));
4284 	}
4285 
4286 	/*
4287 	 * We will have 2 completion queues in MSI / Legacy mode,
4288 	 * Queue 0 for default completions
4289 	 * Queue 1 for transmit completions
4290 	 */
4291 	qlge->rss_ring_count = 1; /* Default completion queue (0) for all */
4292 	qlge->tx_ring_count = 1; /* Single tx completion queue */
4293 	qlge->rx_ring_count = qlge->tx_ring_count + qlge->rss_ring_count;
4294 
4295 	QL_PRINT(DBG_INIT, ("%s(%d) Falling back to single completion queue \n",
4296 	    __func__, qlge->instance));
4297 	/*
4298 	 * Add the h/w interrupt handler and initialise mutexes
4299 	 */
4300 	rval = DDI_FAILURE;
4301 
4302 	/*
4303 	 * If OS supports MSIX interrupt but fails to allocate, then try
4304 	 * MSI interrupt. If MSI interrupt allocation fails also, then roll
4305 	 * back to fixed interrupt.
4306 	 */
4307 	if (intr_types & DDI_INTR_TYPE_MSI) {
4308 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_MSI);
4309 		if (rval == DDI_SUCCESS) {
4310 			qlge->intr_type = DDI_INTR_TYPE_MSI;
4311 			QL_PRINT(DBG_INIT, ("%s(%d) use MSI Interrupt \n",
4312 			    __func__, qlge->instance));
4313 		}
4314 	}
4315 
4316 	/* Try Fixed interrupt Legacy mode */
4317 	if (rval != DDI_SUCCESS) {
4318 		rval = ql_request_irq_vectors(qlge, DDI_INTR_TYPE_FIXED);
4319 		if (rval != DDI_SUCCESS) {
4320 			cmn_err(CE_WARN, "%s(%d):Legacy mode interrupt "
4321 			    "allocation failed",
4322 			    __func__, qlge->instance);
4323 		} else {
4324 			qlge->intr_type = DDI_INTR_TYPE_FIXED;
4325 			QL_PRINT(DBG_INIT, ("%s(%d) use Fixed Interrupt \n",
4326 			    __func__, qlge->instance));
4327 		}
4328 	}
4329 
4330 	return (rval);
4331 }
4332 
4333 static void
4334 ql_free_rx_tx_locks(qlge_t *qlge)
4335 {
4336 	int i;
4337 	struct rx_ring *rx_ring;
4338 	struct tx_ring *tx_ring;
4339 
4340 	for (i = 0; i < qlge->tx_ring_count; i++) {
4341 		tx_ring = &qlge->tx_ring[i];
4342 		mutex_destroy(&tx_ring->tx_lock);
4343 	}
4344 
4345 	for (i = 0; i < qlge->rx_ring_count; i++) {
4346 		rx_ring = &qlge->rx_ring[i];
4347 		mutex_destroy(&rx_ring->rx_lock);
4348 		mutex_destroy(&rx_ring->sbq_lock);
4349 		mutex_destroy(&rx_ring->lbq_lock);
4350 	}
4351 }
4352 
4353 /*
4354  * Frees all resources allocated during attach.
4355  *
4356  * Input:
4357  * dip = pointer to device information structure.
4358  * sequence = bits indicating resources to free.
4359  *
4360  * Context:
4361  * Kernel context.
4362  */
4363 static void
4364 ql_free_resources(qlge_t *qlge)
4365 {
4366 
4367 	/* Disable driver timer */
4368 	ql_stop_timer(qlge);
4369 
4370 	if (qlge->sequence & INIT_MAC_REGISTERED) {
4371 		(void) mac_unregister(qlge->mh);
4372 		qlge->sequence &= ~INIT_MAC_REGISTERED;
4373 	}
4374 
4375 	if (qlge->sequence & INIT_MAC_ALLOC) {
4376 		/* Nothing to do, macp is already freed */
4377 		qlge->sequence &= ~INIT_MAC_ALLOC;
4378 	}
4379 
4380 	if (qlge->sequence & INIT_PCI_CONFIG_SETUP) {
4381 		pci_config_teardown(&qlge->pci_handle);
4382 		qlge->sequence &= ~INIT_PCI_CONFIG_SETUP;
4383 	}
4384 
4385 	if (qlge->sequence & INIT_ADD_INTERRUPT) {
4386 		ql_free_irq_vectors(qlge);
4387 		qlge->sequence &= ~INIT_ADD_INTERRUPT;
4388 	}
4389 
4390 	if (qlge->sequence & INIT_ADD_SOFT_INTERRUPT) {
4391 		(void) ddi_intr_remove_softint(qlge->mpi_event_intr_hdl);
4392 		(void) ddi_intr_remove_softint(qlge->mpi_reset_intr_hdl);
4393 		(void) ddi_intr_remove_softint(qlge->asic_reset_intr_hdl);
4394 		qlge->sequence &= ~INIT_ADD_SOFT_INTERRUPT;
4395 	}
4396 
4397 	if (qlge->sequence & INIT_KSTATS) {
4398 		ql_fini_kstats(qlge);
4399 		qlge->sequence &= ~INIT_KSTATS;
4400 	}
4401 
4402 	if (qlge->sequence & INIT_MUTEX) {
4403 		mutex_destroy(&qlge->gen_mutex);
4404 		mutex_destroy(&qlge->hw_mutex);
4405 		mutex_destroy(&qlge->mbx_mutex);
4406 		cv_destroy(&qlge->cv_mbx_intr);
4407 		qlge->sequence &= ~INIT_MUTEX;
4408 	}
4409 
4410 	if (qlge->sequence & INIT_LOCKS_CREATED) {
4411 		ql_free_rx_tx_locks(qlge);
4412 		qlge->sequence &= ~INIT_LOCKS_CREATED;
4413 	}
4414 
4415 	if (qlge->sequence & INIT_MEMORY_ALLOC) {
4416 		ql_free_mem_resources(qlge);
4417 		qlge->sequence &= ~INIT_MEMORY_ALLOC;
4418 	}
4419 
4420 	if (qlge->sequence & INIT_REGS_SETUP) {
4421 		ddi_regs_map_free(&qlge->dev_handle);
4422 		qlge->sequence &= ~INIT_REGS_SETUP;
4423 	}
4424 
4425 	if (qlge->sequence & INIT_DOORBELL_REGS_SETUP) {
4426 		ddi_regs_map_free(&qlge->dev_doorbell_reg_handle);
4427 		qlge->sequence &= ~INIT_DOORBELL_REGS_SETUP;
4428 	}
4429 
4430 	/*
4431 	 * free flash flt table that allocated in attach stage
4432 	 */
4433 	if ((qlge->flt.ql_flt_entry_ptr != NULL)&&
4434 	    (qlge->flt.header.length != 0)) {
4435 		kmem_free(qlge->flt.ql_flt_entry_ptr, qlge->flt.header.length);
4436 		qlge->flt.ql_flt_entry_ptr = NULL;
4437 	}
4438 
4439 	if (qlge->sequence & INIT_FM) {
4440 		ql_fm_fini(qlge);
4441 		qlge->sequence &= ~INIT_FM;
4442 	}
4443 
4444 	ddi_prop_remove_all(qlge->dip);
4445 	ddi_set_driver_private(qlge->dip, NULL);
4446 
4447 	/* finally, free qlge structure */
4448 	if (qlge->sequence & INIT_SOFTSTATE_ALLOC) {
4449 		kmem_free(qlge, sizeof (qlge_t));
4450 	}
4451 }
4452 
4453 /*
4454  * Set promiscuous mode of the driver
4455  * Caller must catch HW_LOCK
4456  */
4457 void
4458 ql_set_promiscuous(qlge_t *qlge, int mode)
4459 {
4460 	if (mode) {
4461 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4462 		    RT_IDX_VALID, 1);
4463 	} else {
4464 		(void) ql_set_routing_reg(qlge, RT_IDX_PROMISCUOUS_SLOT,
4465 		    RT_IDX_VALID, 0);
4466 	}
4467 }
4468 /*
4469  * Write 'data1' to Mac Protocol Address Index Register and
4470  * 'data2' to Mac Protocol Address Data Register
4471  *  Assuming that the Mac Protocol semaphore lock has been acquired.
4472  */
4473 static int
4474 ql_write_mac_proto_regs(qlge_t *qlge, uint32_t data1, uint32_t data2)
4475 {
4476 	int return_value = DDI_SUCCESS;
4477 
4478 	if (ql_wait_reg_bit(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
4479 	    MAC_PROTOCOL_ADDRESS_INDEX_MW, BIT_SET, 5) != DDI_SUCCESS) {
4480 		cmn_err(CE_WARN, "Wait for MAC_PROTOCOL Address Register "
4481 		    "timeout.");
4482 		return_value = DDI_FAILURE;
4483 		goto out;
4484 	}
4485 	ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX /* A8 */, data1);
4486 	ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA /* 0xAC */, data2);
4487 out:
4488 	return (return_value);
4489 }
4490 /*
4491  * Enable the 'index'ed multicast address in the host memory's multicast_list
4492  */
4493 int
4494 ql_add_multicast_address(qlge_t *qlge, int index)
4495 {
4496 	int rtn_val = DDI_FAILURE;
4497 	uint32_t offset;
4498 	uint32_t value1, value2;
4499 
4500 	/* Acquire the required semaphore */
4501 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4502 		return (rtn_val);
4503 	}
4504 
4505 	/* Program Offset0 - lower 32 bits of the MAC address */
4506 	offset = 0;
4507 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4508 	    (index << 4) | offset;
4509 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4510 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4511 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4512 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4513 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS)
4514 		goto out;
4515 
4516 	/* Program offset1: upper 16 bits of the MAC address */
4517 	offset = 1;
4518 	value1 = MAC_PROTOCOL_ADDRESS_ENABLE | MAC_PROTOCOL_TYPE_MULTICAST |
4519 	    (index<<4) | offset;
4520 	value2 = ((qlge->multicast_list[index].addr.ether_addr_octet[0] << 8)
4521 	    |qlge->multicast_list[index].addr.ether_addr_octet[1]);
4522 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4523 		goto out;
4524 	}
4525 	rtn_val = DDI_SUCCESS;
4526 out:
4527 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4528 	return (rtn_val);
4529 }
4530 
4531 /*
4532  * Disable the 'index'ed multicast address in the host memory's multicast_list
4533  */
4534 int
4535 ql_remove_multicast_address(qlge_t *qlge, int index)
4536 {
4537 	int rtn_val = DDI_FAILURE;
4538 	uint32_t offset;
4539 	uint32_t value1, value2;
4540 
4541 	/* Acquire the required semaphore */
4542 	if (ql_sem_spinlock(qlge, QL_MAC_PROTOCOL_SEM_MASK) != DDI_SUCCESS) {
4543 		return (rtn_val);
4544 	}
4545 	/* Program Offset0 - lower 32 bits of the MAC address */
4546 	offset = 0;
4547 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4548 	value2 =
4549 	    ((qlge->multicast_list[index].addr.ether_addr_octet[2] << 24)
4550 	    |(qlge->multicast_list[index].addr.ether_addr_octet[3] << 16)
4551 	    |(qlge->multicast_list[index].addr.ether_addr_octet[4] << 8)
4552 	    |(qlge->multicast_list[index].addr.ether_addr_octet[5]));
4553 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4554 		goto out;
4555 	}
4556 	/* Program offset1: upper 16 bits of the MAC address */
4557 	offset = 1;
4558 	value1 = (MAC_PROTOCOL_TYPE_MULTICAST | offset)|(index<<4);
4559 	value2 = 0;
4560 	if (ql_write_mac_proto_regs(qlge, value1, value2) != DDI_SUCCESS) {
4561 		goto out;
4562 	}
4563 	rtn_val = DDI_SUCCESS;
4564 out:
4565 	ql_sem_unlock(qlge, QL_MAC_PROTOCOL_SEM_MASK);
4566 	return (rtn_val);
4567 }
4568 
4569 /*
4570  * Add a new multicast address to the list of supported list
4571  * This API is called after OS called gld_set_multicast (GLDv2)
4572  * or m_multicst (GLDv3)
4573  *
4574  * Restriction:
4575  * The number of maximum multicast address is limited by hardware.
4576  */
4577 int
4578 ql_add_to_multicast_list(qlge_t *qlge, uint8_t *ep)
4579 {
4580 	uint32_t index = qlge->multicast_list_count;
4581 	int rval = DDI_SUCCESS;
4582 	int status;
4583 
4584 	if ((ep[0] & 01) == 0) {
4585 		rval = EINVAL;
4586 		goto exit;
4587 	}
4588 
4589 	/* if there is an availabe space in multicast_list, then add it */
4590 	if (index < MAX_MULTICAST_LIST_SIZE) {
4591 		bcopy(ep, qlge->multicast_list[index].addr.ether_addr_octet,
4592 		    ETHERADDRL);
4593 		/* increment the total number of addresses in multicast list */
4594 		(void) ql_add_multicast_address(qlge, index);
4595 		qlge->multicast_list_count++;
4596 		QL_PRINT(DBG_GLD,
4597 		    ("%s(%d): added to index of multicast list= 0x%x, "
4598 		    "total %d\n", __func__, qlge->instance, index,
4599 		    qlge->multicast_list_count));
4600 
4601 		if (index > MAX_MULTICAST_HW_SIZE) {
4602 			if (!qlge->multicast_promisc) {
4603 				status = ql_set_routing_reg(qlge,
4604 				    RT_IDX_ALLMULTI_SLOT,
4605 				    RT_IDX_MCAST, 1);
4606 				if (status) {
4607 					cmn_err(CE_WARN,
4608 					    "Failed to init routing reg "
4609 					    "for mcast promisc mode.");
4610 					rval = ENOENT;
4611 					goto exit;
4612 				}
4613 				qlge->multicast_promisc = B_TRUE;
4614 			}
4615 		}
4616 	} else {
4617 		rval = ENOENT;
4618 	}
4619 exit:
4620 	return (rval);
4621 }
4622 
4623 /*
4624  * Remove an old multicast address from the list of supported multicast
4625  * addresses. This API is called after OS called gld_set_multicast (GLDv2)
4626  * or m_multicst (GLDv3)
4627  * The number of maximum multicast address is limited by hardware.
4628  */
4629 int
4630 ql_remove_from_multicast_list(qlge_t *qlge, uint8_t *ep)
4631 {
4632 	uint32_t total = qlge->multicast_list_count;
4633 	int i = 0;
4634 	int rmv_index = 0;
4635 	size_t length = sizeof (ql_multicast_addr);
4636 	int status;
4637 
4638 	for (i = 0; i < total; i++) {
4639 		if (bcmp(ep, &qlge->multicast_list[i].addr, ETHERADDRL) != 0) {
4640 			continue;
4641 		}
4642 
4643 		rmv_index = i;
4644 		/* block move the reset of other multicast address forward */
4645 		length = ((total -1) -i) * sizeof (ql_multicast_addr);
4646 		if (length > 0) {
4647 			bcopy(&qlge->multicast_list[i+1],
4648 			    &qlge->multicast_list[i], length);
4649 		}
4650 		qlge->multicast_list_count--;
4651 		if (qlge->multicast_list_count <= MAX_MULTICAST_HW_SIZE) {
4652 			/*
4653 			 * there is a deletion in multicast list table,
4654 			 * re-enable them
4655 			 */
4656 			for (i = rmv_index; i < qlge->multicast_list_count;
4657 			    i++) {
4658 				(void) ql_add_multicast_address(qlge, i);
4659 			}
4660 			/* and disable the last one */
4661 			(void) ql_remove_multicast_address(qlge, i);
4662 
4663 			/* disable multicast promiscuous mode */
4664 			if (qlge->multicast_promisc) {
4665 				status = ql_set_routing_reg(qlge,
4666 				    RT_IDX_ALLMULTI_SLOT,
4667 				    RT_IDX_MCAST, 0);
4668 				if (status) {
4669 					cmn_err(CE_WARN,
4670 					    "Failed to init routing reg for "
4671 					    "mcast promisc mode.");
4672 					goto exit;
4673 				}
4674 				/* write to config register */
4675 				qlge->multicast_promisc = B_FALSE;
4676 			}
4677 		}
4678 		break;
4679 	}
4680 exit:
4681 	return (DDI_SUCCESS);
4682 }
4683 
4684 /*
4685  * Read a XGMAC register
4686  */
4687 int
4688 ql_read_xgmac_reg(qlge_t *qlge, uint32_t addr, uint32_t *val)
4689 {
4690 	int rtn_val = DDI_FAILURE;
4691 
4692 	/* wait for XGMAC Address register RDY bit set */
4693 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4694 	    BIT_SET, 10) != DDI_SUCCESS) {
4695 		goto out;
4696 	}
4697 	/* start rx transaction */
4698 	ql_write_reg(qlge, REG_XGMAC_ADDRESS, addr|XGMAC_ADDRESS_READ_TRANSACT);
4699 
4700 	/*
4701 	 * wait for XGMAC Address register RDY bit set,
4702 	 * which indicates data is ready
4703 	 */
4704 	if (ql_wait_reg_bit(qlge, REG_XGMAC_ADDRESS, XGMAC_ADDRESS_RDY,
4705 	    BIT_SET, 10) != DDI_SUCCESS) {
4706 		goto out;
4707 	}
4708 	/* read data from XGAMC_DATA register */
4709 	*val = ql_read_reg(qlge, REG_XGMAC_DATA);
4710 	rtn_val = DDI_SUCCESS;
4711 out:
4712 	return (rtn_val);
4713 }
4714 
4715 /*
4716  * Implement checksum offload for IPv4 IP packets
4717  */
4718 static void
4719 ql_hw_csum_setup(qlge_t *qlge, uint32_t pflags, caddr_t bp,
4720     struct ob_mac_iocb_req *mac_iocb_ptr)
4721 {
4722 	struct ip *iphdr = NULL;
4723 	struct ether_header *ethhdr;
4724 	struct ether_vlan_header *ethvhdr;
4725 	struct tcphdr *tcp_hdr;
4726 	uint32_t etherType;
4727 	int mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
4728 	int ip_hdr_off, tcp_udp_hdr_off, hdr_off;
4729 
4730 	ethhdr  = (struct ether_header *)((void *)bp);
4731 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4732 	/* Is this vlan packet? */
4733 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
4734 		mac_hdr_len = sizeof (struct ether_vlan_header);
4735 		etherType = ntohs(ethvhdr->ether_type);
4736 	} else {
4737 		mac_hdr_len = sizeof (struct ether_header);
4738 		etherType = ntohs(ethhdr->ether_type);
4739 	}
4740 	/* Is this IPv4 or IPv6 packet? */
4741 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len)) ==
4742 	    IPV4_VERSION) {
4743 		if (etherType == ETHERTYPE_IP /* 0800 */) {
4744 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
4745 		} else {
4746 			/* EMPTY */
4747 			QL_PRINT(DBG_TX,
4748 			    ("%s(%d) : IPv4 None IP packet type 0x%x\n",
4749 			    __func__, qlge->instance, etherType));
4750 		}
4751 	}
4752 	/* ipV4 packets */
4753 	if (iphdr != NULL) {
4754 
4755 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
4756 		QL_PRINT(DBG_TX,
4757 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH:"
4758 		    " %d bytes \n", __func__, qlge->instance, ip_hdr_len));
4759 
4760 		ip_hdr_off = mac_hdr_len;
4761 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
4762 		    __func__, qlge->instance, ip_hdr_len));
4763 
4764 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
4765 		    OB_MAC_IOCB_REQ_IPv4);
4766 
4767 		if (pflags & HCK_IPV4_HDRCKSUM) {
4768 			QL_PRINT(DBG_TX, ("%s(%d) : Do IPv4 header checksum\n",
4769 			    __func__, qlge->instance));
4770 			mac_iocb_ptr->opcode = OPCODE_OB_MAC_OFFLOAD_IOCB;
4771 			mac_iocb_ptr->flag2 = (uint8_t)(mac_iocb_ptr->flag2 |
4772 			    OB_MAC_IOCB_REQ_IC);
4773 			iphdr->ip_sum = 0;
4774 			mac_iocb_ptr->hdr_off = (uint16_t)
4775 			    cpu_to_le16(ip_hdr_off);
4776 		}
4777 		if (pflags & HCK_FULLCKSUM) {
4778 			if (iphdr->ip_p == IPPROTO_TCP) {
4779 				tcp_hdr =
4780 				    (struct tcphdr *)(void *)
4781 				    ((uint8_t *)(void *)iphdr + ip_hdr_len);
4782 				QL_PRINT(DBG_TX, ("%s(%d) : Do TCP checksum\n",
4783 				    __func__, qlge->instance));
4784 				mac_iocb_ptr->opcode =
4785 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4786 				mac_iocb_ptr->flag1 =
4787 				    (uint8_t)(mac_iocb_ptr->flag1 |
4788 				    OB_MAC_IOCB_REQ_TC);
4789 				mac_iocb_ptr->flag2 =
4790 				    (uint8_t)(mac_iocb_ptr->flag2 |
4791 				    OB_MAC_IOCB_REQ_IC);
4792 				iphdr->ip_sum = 0;
4793 				tcp_udp_hdr_off = mac_hdr_len+ip_hdr_len;
4794 				tcp_udp_hdr_len = tcp_hdr->th_off*4;
4795 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
4796 				    __func__, qlge->instance, tcp_udp_hdr_len));
4797 				hdr_off = ip_hdr_off;
4798 				tcp_udp_hdr_off <<= 6;
4799 				hdr_off |= tcp_udp_hdr_off;
4800 				mac_iocb_ptr->hdr_off =
4801 				    (uint16_t)cpu_to_le16(hdr_off);
4802 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4803 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
4804 				    tcp_udp_hdr_len);
4805 
4806 				/*
4807 				 * if the chip is unable to do pseudo header
4808 				 * cksum calculation, do it in then put the
4809 				 * result to the data passed to the chip
4810 				 */
4811 				if (qlge->cfg_flags &
4812 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4813 					ql_pseudo_cksum((uint8_t *)iphdr);
4814 				}
4815 			} else if (iphdr->ip_p == IPPROTO_UDP) {
4816 				QL_PRINT(DBG_TX, ("%s(%d) : Do UDP checksum\n",
4817 				    __func__, qlge->instance));
4818 				mac_iocb_ptr->opcode =
4819 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
4820 				mac_iocb_ptr->flag1 =
4821 				    (uint8_t)(mac_iocb_ptr->flag1 |
4822 				    OB_MAC_IOCB_REQ_UC);
4823 				mac_iocb_ptr->flag2 =
4824 				    (uint8_t)(mac_iocb_ptr->flag2 |
4825 				    OB_MAC_IOCB_REQ_IC);
4826 				iphdr->ip_sum = 0;
4827 				tcp_udp_hdr_off = mac_hdr_len + ip_hdr_len;
4828 				tcp_udp_hdr_len = sizeof (struct udphdr);
4829 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
4830 				    __func__, qlge->instance, tcp_udp_hdr_len));
4831 				hdr_off = ip_hdr_off;
4832 				tcp_udp_hdr_off <<= 6;
4833 				hdr_off |= tcp_udp_hdr_off;
4834 				mac_iocb_ptr->hdr_off =
4835 				    (uint16_t)cpu_to_le16(hdr_off);
4836 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
4837 				    cpu_to_le16(mac_hdr_len + ip_hdr_len
4838 				    + tcp_udp_hdr_len);
4839 
4840 				/*
4841 				 * if the chip is unable to calculate pseudo
4842 				 * hdr cksum,do it in then put the result to
4843 				 * the data passed to the chip
4844 				 */
4845 				if (qlge->cfg_flags &
4846 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM) {
4847 					ql_pseudo_cksum((uint8_t *)iphdr);
4848 				}
4849 			}
4850 		}
4851 	}
4852 }
4853 
4854 /*
4855  * For TSO/LSO:
4856  * MAC frame transmission with TCP large segment offload is performed in the
4857  * same way as the MAC frame transmission with checksum offload with the
4858  * exception that the maximum TCP segment size (MSS) must be specified to
4859  * allow the chip to segment the data into legal sized frames.
4860  * The host also needs to calculate a pseudo-header checksum over the
4861  * following fields:
4862  * Source IP Address, Destination IP Address, and the Protocol.
4863  * The TCP length is not included in the pseudo-header calculation.
4864  * The pseudo-header checksum is place in the TCP checksum field of the
4865  * prototype header.
4866  */
4867 static void
4868 ql_lso_pseudo_cksum(uint8_t *buf)
4869 {
4870 	uint32_t cksum;
4871 	uint16_t iphl;
4872 	uint16_t proto;
4873 
4874 	/*
4875 	 * Calculate the LSO pseudo-header checksum.
4876 	 */
4877 	iphl = (uint16_t)(4 * (buf[0] & 0xF));
4878 	cksum = proto = buf[9];
4879 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
4880 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
4881 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
4882 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
4883 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4884 	cksum = (cksum>>16) + (cksum & 0xFFFF);
4885 
4886 	/*
4887 	 * Point it to the TCP/UDP header, and
4888 	 * update the checksum field.
4889 	 */
4890 	buf += iphl + ((proto == IPPROTO_TCP) ?
4891 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
4892 
4893 	*(uint16_t *)(void *)buf = (uint16_t)htons((uint16_t)cksum);
4894 }
4895 
4896 /*
4897  * For IPv4 IP packets, distribute the tx packets evenly among tx rings
4898  */
4899 typedef	uint32_t	ub4; /* unsigned 4-byte quantities */
4900 typedef	uint8_t		ub1;
4901 
4902 #define	hashsize(n)	((ub4)1<<(n))
4903 #define	hashmask(n)	(hashsize(n)-1)
4904 
4905 #define	mix(a, b, c) \
4906 { \
4907 	a -= b; a -= c; a ^= (c>>13); \
4908 	b -= c; b -= a; b ^= (a<<8); \
4909 	c -= a; c -= b; c ^= (b>>13); \
4910 	a -= b; a -= c; a ^= (c>>12);  \
4911 	b -= c; b -= a; b ^= (a<<16); \
4912 	c -= a; c -= b; c ^= (b>>5); \
4913 	a -= b; a -= c; a ^= (c>>3);  \
4914 	b -= c; b -= a; b ^= (a<<10); \
4915 	c -= a; c -= b; c ^= (b>>15); \
4916 }
4917 
4918 ub4
4919 hash(k, length, initval)
4920 register ub1 *k;	/* the key */
4921 register ub4 length;	/* the length of the key */
4922 register ub4 initval;	/* the previous hash, or an arbitrary value */
4923 {
4924 	register ub4 a, b, c, len;
4925 
4926 	/* Set up the internal state */
4927 	len = length;
4928 	a = b = 0x9e3779b9;	/* the golden ratio; an arbitrary value */
4929 	c = initval;		/* the previous hash value */
4930 
4931 	/* handle most of the key */
4932 	while (len >= 12) {
4933 		a += (k[0] +((ub4)k[1]<<8) +((ub4)k[2]<<16) +((ub4)k[3]<<24));
4934 		b += (k[4] +((ub4)k[5]<<8) +((ub4)k[6]<<16) +((ub4)k[7]<<24));
4935 		c += (k[8] +((ub4)k[9]<<8) +((ub4)k[10]<<16)+((ub4)k[11]<<24));
4936 		mix(a, b, c);
4937 		k += 12;
4938 		len -= 12;
4939 	}
4940 
4941 	/* handle the last 11 bytes */
4942 	c += length;
4943 	/* all the case statements fall through */
4944 	switch (len) {
4945 		/* FALLTHRU */
4946 	case 11: c += ((ub4)k[10]<<24);
4947 		/* FALLTHRU */
4948 	case 10: c += ((ub4)k[9]<<16);
4949 		/* FALLTHRU */
4950 	case 9 : c += ((ub4)k[8]<<8);
4951 	/* the first byte of c is reserved for the length */
4952 		/* FALLTHRU */
4953 	case 8 : b += ((ub4)k[7]<<24);
4954 		/* FALLTHRU */
4955 	case 7 : b += ((ub4)k[6]<<16);
4956 		/* FALLTHRU */
4957 	case 6 : b += ((ub4)k[5]<<8);
4958 		/* FALLTHRU */
4959 	case 5 : b += k[4];
4960 		/* FALLTHRU */
4961 	case 4 : a += ((ub4)k[3]<<24);
4962 		/* FALLTHRU */
4963 	case 3 : a += ((ub4)k[2]<<16);
4964 		/* FALLTHRU */
4965 	case 2 : a += ((ub4)k[1]<<8);
4966 		/* FALLTHRU */
4967 	case 1 : a += k[0];
4968 	/* case 0: nothing left to add */
4969 	}
4970 	mix(a, b, c);
4971 	/* report the result */
4972 	return (c);
4973 }
4974 
4975 uint8_t
4976 ql_tx_hashing(qlge_t *qlge, caddr_t bp)
4977 {
4978 	struct ip *iphdr = NULL;
4979 	struct ether_header *ethhdr;
4980 	struct ether_vlan_header *ethvhdr;
4981 	struct tcphdr *tcp_hdr;
4982 	struct udphdr *udp_hdr;
4983 	uint32_t etherType;
4984 	int mac_hdr_len, ip_hdr_len;
4985 	uint32_t h = 0; /* 0 by default */
4986 	uint8_t tx_ring_id = 0;
4987 	uint32_t ip_src_addr = 0;
4988 	uint32_t ip_desc_addr = 0;
4989 	uint16_t src_port = 0;
4990 	uint16_t dest_port = 0;
4991 	uint8_t key[12];
4992 	QL_PRINT(DBG_TX, ("%s(%d) entered \n", __func__, qlge->instance));
4993 
4994 	ethhdr = (struct ether_header *)((void *)bp);
4995 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
4996 
4997 	if (qlge->tx_ring_count == 1)
4998 		return (tx_ring_id);
4999 
5000 	/* Is this vlan packet? */
5001 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5002 		mac_hdr_len = sizeof (struct ether_vlan_header);
5003 		etherType = ntohs(ethvhdr->ether_type);
5004 	} else {
5005 		mac_hdr_len = sizeof (struct ether_header);
5006 		etherType = ntohs(ethhdr->ether_type);
5007 	}
5008 	/* Is this IPv4 or IPv6 packet? */
5009 	if (etherType == ETHERTYPE_IP /* 0800 */) {
5010 		if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len))
5011 		    == IPV4_VERSION) {
5012 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
5013 		}
5014 		if (((unsigned long)iphdr) & 0x3) {
5015 			/*  IP hdr not 4-byte aligned */
5016 			return (tx_ring_id);
5017 		}
5018 	}
5019 	/* ipV4 packets */
5020 	if (iphdr) {
5021 
5022 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
5023 		ip_src_addr = iphdr->ip_src.s_addr;
5024 		ip_desc_addr = iphdr->ip_dst.s_addr;
5025 
5026 		if (iphdr->ip_p == IPPROTO_TCP) {
5027 			tcp_hdr = (struct tcphdr *)(void *)
5028 			    ((uint8_t *)iphdr + ip_hdr_len);
5029 			src_port = tcp_hdr->th_sport;
5030 			dest_port = tcp_hdr->th_dport;
5031 		} else if (iphdr->ip_p == IPPROTO_UDP) {
5032 			udp_hdr = (struct udphdr *)(void *)
5033 			    ((uint8_t *)iphdr + ip_hdr_len);
5034 			src_port = udp_hdr->uh_sport;
5035 			dest_port = udp_hdr->uh_dport;
5036 		}
5037 		key[0] = (uint8_t)((ip_src_addr) &0xFF);
5038 		key[1] = (uint8_t)((ip_src_addr >> 8) &0xFF);
5039 		key[2] = (uint8_t)((ip_src_addr >> 16) &0xFF);
5040 		key[3] = (uint8_t)((ip_src_addr >> 24) &0xFF);
5041 		key[4] = (uint8_t)((ip_desc_addr) &0xFF);
5042 		key[5] = (uint8_t)((ip_desc_addr >> 8) &0xFF);
5043 		key[6] = (uint8_t)((ip_desc_addr >> 16) &0xFF);
5044 		key[7] = (uint8_t)((ip_desc_addr >> 24) &0xFF);
5045 		key[8] = (uint8_t)((src_port) &0xFF);
5046 		key[9] = (uint8_t)((src_port >> 8) &0xFF);
5047 		key[10] = (uint8_t)((dest_port) &0xFF);
5048 		key[11] = (uint8_t)((dest_port >> 8) &0xFF);
5049 		h = hash(key, 12, 0); /* return 32 bit */
5050 		tx_ring_id = (h & (qlge->tx_ring_count - 1));
5051 		if (tx_ring_id >= qlge->tx_ring_count) {
5052 			cmn_err(CE_WARN, "%s bad tx_ring_id %d\n",
5053 			    __func__, tx_ring_id);
5054 			tx_ring_id = 0;
5055 		}
5056 	}
5057 	return (tx_ring_id);
5058 }
5059 
5060 /*
5061  * Tell the hardware to do Large Send Offload (LSO)
5062  *
5063  * Some fields in ob_mac_iocb need to be set so hardware can know what is
5064  * the incoming packet, TCP or UDP, whether a VLAN tag needs to be inserted
5065  * in the right place of the packet etc, thus, hardware can process the
5066  * packet correctly.
5067  */
5068 static void
5069 ql_hw_lso_setup(qlge_t *qlge, uint32_t mss, caddr_t bp,
5070     struct ob_mac_iocb_req *mac_iocb_ptr)
5071 {
5072 	struct ip *iphdr = NULL;
5073 	struct ether_header *ethhdr;
5074 	struct ether_vlan_header *ethvhdr;
5075 	struct tcphdr *tcp_hdr;
5076 	struct udphdr *udp_hdr;
5077 	uint32_t etherType;
5078 	uint16_t mac_hdr_len, ip_hdr_len, tcp_udp_hdr_len;
5079 	uint16_t ip_hdr_off, tcp_udp_hdr_off, hdr_off;
5080 
5081 	ethhdr = (struct ether_header *)(void *)bp;
5082 	ethvhdr = (struct ether_vlan_header *)(void *)bp;
5083 
5084 	/* Is this vlan packet? */
5085 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
5086 		mac_hdr_len = sizeof (struct ether_vlan_header);
5087 		etherType = ntohs(ethvhdr->ether_type);
5088 	} else {
5089 		mac_hdr_len = sizeof (struct ether_header);
5090 		etherType = ntohs(ethhdr->ether_type);
5091 	}
5092 	/* Is this IPv4 or IPv6 packet? */
5093 	if (IPH_HDR_VERSION((ipha_t *)(void *)(bp + mac_hdr_len)) ==
5094 	    IPV4_VERSION) {
5095 		if (etherType == ETHERTYPE_IP /* 0800 */) {
5096 			iphdr 	= (struct ip *)(void *)(bp+mac_hdr_len);
5097 		} else {
5098 			/* EMPTY */
5099 			QL_PRINT(DBG_TX, ("%s(%d) : IPv4 None IP packet"
5100 			    " type 0x%x\n",
5101 			    __func__, qlge->instance, etherType));
5102 		}
5103 	}
5104 
5105 	if (iphdr != NULL) { /* ipV4 packets */
5106 		ip_hdr_len = (uint16_t)IPH_HDR_LENGTH(iphdr);
5107 		QL_PRINT(DBG_TX,
5108 		    ("%s(%d) : IPv4 header length using IPH_HDR_LENGTH: %d"
5109 		    " bytes \n", __func__, qlge->instance, ip_hdr_len));
5110 
5111 		ip_hdr_off = mac_hdr_len;
5112 		QL_PRINT(DBG_TX, ("%s(%d) : ip_hdr_len=%d\n",
5113 		    __func__, qlge->instance, ip_hdr_len));
5114 
5115 		mac_iocb_ptr->flag0 = (uint8_t)(mac_iocb_ptr->flag0 |
5116 		    OB_MAC_IOCB_REQ_IPv4);
5117 		if (qlge->cfg_flags & CFG_CKSUM_FULL_IPv4) {
5118 			if (iphdr->ip_p == IPPROTO_TCP) {
5119 				tcp_hdr = (struct tcphdr *)(void *)
5120 				    ((uint8_t *)(void *)iphdr +
5121 				    ip_hdr_len);
5122 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on TCP "
5123 				    "packet\n",
5124 				    __func__, qlge->instance));
5125 				mac_iocb_ptr->opcode =
5126 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5127 				mac_iocb_ptr->flag1 =
5128 				    (uint8_t)(mac_iocb_ptr->flag1 |
5129 				    OB_MAC_IOCB_REQ_LSO);
5130 				iphdr->ip_sum = 0;
5131 				tcp_udp_hdr_off =
5132 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5133 				tcp_udp_hdr_len =
5134 				    (uint16_t)(tcp_hdr->th_off*4);
5135 				QL_PRINT(DBG_TX, ("%s(%d): tcp header len:%d\n",
5136 				    __func__, qlge->instance, tcp_udp_hdr_len));
5137 				hdr_off = ip_hdr_off;
5138 				tcp_udp_hdr_off <<= 6;
5139 				hdr_off |= tcp_udp_hdr_off;
5140 				mac_iocb_ptr->hdr_off =
5141 				    (uint16_t)cpu_to_le16(hdr_off);
5142 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5143 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5144 				    tcp_udp_hdr_len);
5145 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5146 
5147 				/*
5148 				 * if the chip is unable to calculate pseudo
5149 				 * header checksum, do it in then put the result
5150 				 * to the data passed to the chip
5151 				 */
5152 				if (qlge->cfg_flags &
5153 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5154 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5155 			} else if (iphdr->ip_p == IPPROTO_UDP) {
5156 				udp_hdr = (struct udphdr *)(void *)
5157 				    ((uint8_t *)(void *)iphdr
5158 				    + ip_hdr_len);
5159 				QL_PRINT(DBG_TX, ("%s(%d) : Do TSO on UDP "
5160 				    "packet\n",
5161 				    __func__, qlge->instance));
5162 				mac_iocb_ptr->opcode =
5163 				    OPCODE_OB_MAC_OFFLOAD_IOCB;
5164 				mac_iocb_ptr->flag1 =
5165 				    (uint8_t)(mac_iocb_ptr->flag1 |
5166 				    OB_MAC_IOCB_REQ_LSO);
5167 				iphdr->ip_sum = 0;
5168 				tcp_udp_hdr_off =
5169 				    (uint16_t)(mac_hdr_len+ip_hdr_len);
5170 				tcp_udp_hdr_len =
5171 				    (uint16_t)(udp_hdr->uh_ulen*4);
5172 				QL_PRINT(DBG_TX, ("%s(%d):udp header len:%d\n",
5173 				    __func__, qlge->instance, tcp_udp_hdr_len));
5174 				hdr_off = ip_hdr_off;
5175 				tcp_udp_hdr_off <<= 6;
5176 				hdr_off |= tcp_udp_hdr_off;
5177 				mac_iocb_ptr->hdr_off =
5178 				    (uint16_t)cpu_to_le16(hdr_off);
5179 				mac_iocb_ptr->protocol_hdr_len = (uint16_t)
5180 				    cpu_to_le16(mac_hdr_len + ip_hdr_len +
5181 				    tcp_udp_hdr_len);
5182 				mac_iocb_ptr->mss = (uint16_t)cpu_to_le16(mss);
5183 
5184 				/*
5185 				 * if the chip is unable to do pseudo header
5186 				 * checksum calculation, do it here then put the
5187 				 * result to the data passed to the chip
5188 				 */
5189 				if (qlge->cfg_flags &
5190 				    CFG_HW_UNABLE_PSEUDO_HDR_CKSUM)
5191 					ql_lso_pseudo_cksum((uint8_t *)iphdr);
5192 			}
5193 		}
5194 	}
5195 }
5196 
5197 /*
5198  * Generic packet sending function which is used to send one packet.
5199  */
5200 int
5201 ql_send_common(struct tx_ring *tx_ring, mblk_t *mp)
5202 {
5203 	struct tx_ring_desc *tx_cb;
5204 	struct ob_mac_iocb_req *mac_iocb_ptr;
5205 	mblk_t *tp;
5206 	size_t msg_len = 0;
5207 	size_t off;
5208 	caddr_t bp;
5209 	size_t nbyte, total_len;
5210 	uint_t i = 0;
5211 	int j = 0, frags = 0;
5212 	uint32_t phy_addr_low, phy_addr_high;
5213 	uint64_t phys_addr;
5214 	clock_t now;
5215 	uint32_t pflags = 0;
5216 	uint32_t mss = 0;
5217 	enum tx_mode_t tx_mode;
5218 	struct oal_entry *oal_entry;
5219 	int status;
5220 	uint_t ncookies, oal_entries, max_oal_entries;
5221 	size_t max_seg_len = 0;
5222 	boolean_t use_lso = B_FALSE;
5223 	struct oal_entry *tx_entry = NULL;
5224 	struct oal_entry *last_oal_entry;
5225 	qlge_t *qlge = tx_ring->qlge;
5226 	ddi_dma_cookie_t dma_cookie;
5227 	size_t tx_buf_len = QL_MAX_COPY_LENGTH;
5228 	int force_pullup = 0;
5229 
5230 	tp = mp;
5231 	total_len = msg_len = 0;
5232 	max_oal_entries = TX_DESC_PER_IOCB + MAX_SG_ELEMENTS-1;
5233 
5234 	/* Calculate number of data and segments in the incoming message */
5235 	for (tp = mp; tp != NULL; tp = tp->b_cont) {
5236 		nbyte = MBLKL(tp);
5237 		total_len += nbyte;
5238 		max_seg_len = max(nbyte, max_seg_len);
5239 		QL_PRINT(DBG_TX, ("Requested sending data in %d segments, "
5240 		    "total length: %d\n", frags, nbyte));
5241 		frags++;
5242 	}
5243 
5244 	if (total_len >= QL_LSO_MAX) {
5245 		freemsg(mp);
5246 #ifdef QLGE_LOAD_UNLOAD
5247 		cmn_err(CE_NOTE, "%s: quit, packet oversize %d\n",
5248 		    __func__, (int)total_len);
5249 #endif
5250 		return (NULL);
5251 	}
5252 
5253 	bp = (caddr_t)mp->b_rptr;
5254 	if (bp[0] & 1) {
5255 		if (bcmp(bp, ql_ether_broadcast_addr.ether_addr_octet,
5256 		    ETHERADDRL) == 0) {
5257 			QL_PRINT(DBG_TX, ("Broadcast packet\n"));
5258 			tx_ring->brdcstxmt++;
5259 		} else {
5260 			QL_PRINT(DBG_TX, ("multicast packet\n"));
5261 			tx_ring->multixmt++;
5262 		}
5263 	}
5264 
5265 	tx_ring->obytes += total_len;
5266 	tx_ring->opackets ++;
5267 
5268 	QL_PRINT(DBG_TX, ("total requested sending data length: %d, in %d segs,"
5269 	    " max seg len: %d\n", total_len, frags, max_seg_len));
5270 
5271 	/* claim a free slot in tx ring */
5272 	tx_cb = &tx_ring->wq_desc[tx_ring->prod_idx];
5273 
5274 	/* get the tx descriptor */
5275 	mac_iocb_ptr = tx_cb->queue_entry;
5276 
5277 	bzero((void *)mac_iocb_ptr, 20);
5278 
5279 	ASSERT(tx_cb->mp == NULL);
5280 
5281 	/*
5282 	 * Decide to use DMA map or copy mode.
5283 	 * DMA map mode must be used when the total msg length is more than the
5284 	 * tx buffer length.
5285 	 */
5286 
5287 	if (total_len > tx_buf_len)
5288 		tx_mode = USE_DMA;
5289 	else if	(max_seg_len > QL_MAX_COPY_LENGTH)
5290 		tx_mode = USE_DMA;
5291 	else
5292 		tx_mode = USE_COPY;
5293 
5294 	if (qlge->chksum_cap) {
5295 		mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
5296 		QL_PRINT(DBG_TX, ("checksum flag is :0x%x, card capability "
5297 		    "is 0x%x \n", pflags, qlge->chksum_cap));
5298 		if (qlge->lso_enable) {
5299 			uint32_t lso_flags = 0;
5300 			mac_lso_get(mp, &mss, &lso_flags);
5301 			use_lso = (lso_flags == HW_LSO);
5302 		}
5303 		QL_PRINT(DBG_TX, ("mss :%d, use_lso %x \n",
5304 		    mss, use_lso));
5305 	}
5306 
5307 do_pullup:
5308 
5309 	/* concatenate all frags into one large packet if too fragmented */
5310 	if (((tx_mode == USE_DMA)&&(frags > QL_MAX_TX_DMA_HANDLES)) ||
5311 	    force_pullup) {
5312 		mblk_t *mp1;
5313 		if ((mp1 = msgpullup(mp, -1)) != NULL) {
5314 			freemsg(mp);
5315 			mp = mp1;
5316 			frags = 1;
5317 		} else {
5318 			tx_ring->tx_fail_dma_bind++;
5319 			goto bad;
5320 		}
5321 	}
5322 
5323 	tx_cb->tx_bytes = (uint32_t)total_len;
5324 	tx_cb->mp = mp;
5325 	tx_cb->tx_dma_handle_used = 0;
5326 
5327 	if (tx_mode == USE_DMA) {
5328 		msg_len = total_len;
5329 
5330 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5331 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5332 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5333 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5334 
5335 		tx_entry = &mac_iocb_ptr->oal_entry[0];
5336 		oal_entry = NULL;
5337 
5338 		for (tp = mp, oal_entries = j = 0; tp != NULL;
5339 		    tp = tp->b_cont) {
5340 			/* if too many tx dma handles needed */
5341 			if (j >= QL_MAX_TX_DMA_HANDLES) {
5342 				tx_ring->tx_no_dma_handle++;
5343 				if (!force_pullup) {
5344 					force_pullup = 1;
5345 					goto do_pullup;
5346 				} else {
5347 					goto bad;
5348 				}
5349 			}
5350 			nbyte = (uint16_t)MBLKL(tp);
5351 			if (nbyte == 0)
5352 				continue;
5353 
5354 			status = ddi_dma_addr_bind_handle(
5355 			    tx_cb->tx_dma_handle[j], NULL,
5356 			    (caddr_t)tp->b_rptr, nbyte,
5357 			    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
5358 			    0, &dma_cookie, &ncookies);
5359 
5360 			QL_PRINT(DBG_TX, ("map sending data segment: %d, "
5361 			    "length: %d, spans in %d cookies\n",
5362 			    j, nbyte, ncookies));
5363 
5364 			if (status != DDI_DMA_MAPPED) {
5365 				goto bad;
5366 			}
5367 			/*
5368 			 * Each fragment can span several cookies. One cookie
5369 			 * will use one tx descriptor to transmit.
5370 			 */
5371 			for (i = ncookies; i > 0; i--, tx_entry++,
5372 			    oal_entries++) {
5373 				/*
5374 				 * The number of TX descriptors that can be
5375 				 *  saved in tx iocb and oal list is limited
5376 				 */
5377 				if (oal_entries > max_oal_entries) {
5378 					tx_ring->tx_no_dma_cookie++;
5379 					if (!force_pullup) {
5380 						force_pullup = 1;
5381 						goto do_pullup;
5382 					} else {
5383 						goto bad;
5384 					}
5385 				}
5386 
5387 				if ((oal_entries == TX_DESC_PER_IOCB) &&
5388 				    !oal_entry) {
5389 					/*
5390 					 * Time to switch to an oal list
5391 					 * The last entry should be copied
5392 					 * to first entry in the oal list
5393 					 */
5394 					oal_entry = tx_cb->oal;
5395 					tx_entry =
5396 					    &mac_iocb_ptr->oal_entry[
5397 					    TX_DESC_PER_IOCB-1];
5398 					bcopy(tx_entry, oal_entry,
5399 					    sizeof (*oal_entry));
5400 
5401 					/*
5402 					 * last entry should be updated to
5403 					 * point to the extended oal list itself
5404 					 */
5405 					tx_entry->buf_addr_low =
5406 					    cpu_to_le32(
5407 					    LS_64BITS(tx_cb->oal_dma_addr));
5408 					tx_entry->buf_addr_high =
5409 					    cpu_to_le32(
5410 					    MS_64BITS(tx_cb->oal_dma_addr));
5411 					/*
5412 					 * Point tx_entry to the oal list
5413 					 * second entry
5414 					 */
5415 					tx_entry = &oal_entry[1];
5416 				}
5417 
5418 				tx_entry->buf_len =
5419 				    (uint32_t)cpu_to_le32(dma_cookie.dmac_size);
5420 				phys_addr = dma_cookie.dmac_laddress;
5421 				tx_entry->buf_addr_low =
5422 				    cpu_to_le32(LS_64BITS(phys_addr));
5423 				tx_entry->buf_addr_high =
5424 				    cpu_to_le32(MS_64BITS(phys_addr));
5425 
5426 				last_oal_entry = tx_entry;
5427 
5428 				if (i > 1)
5429 					ddi_dma_nextcookie(
5430 					    tx_cb->tx_dma_handle[j],
5431 					    &dma_cookie);
5432 			}
5433 			j++;
5434 		}
5435 		/*
5436 		 * if OAL is used, the last oal entry in tx iocb indicates
5437 		 * number of additional address/len pairs in OAL
5438 		 */
5439 		if (oal_entries > TX_DESC_PER_IOCB) {
5440 			tx_entry = &mac_iocb_ptr->oal_entry[TX_DESC_PER_IOCB-1];
5441 			tx_entry->buf_len = (uint32_t)
5442 			    (cpu_to_le32((sizeof (struct oal_entry) *
5443 			    (oal_entries -TX_DESC_PER_IOCB+1))|OAL_CONT_ENTRY));
5444 		}
5445 		last_oal_entry->buf_len = cpu_to_le32(
5446 		    le32_to_cpu(last_oal_entry->buf_len)|OAL_LAST_ENTRY);
5447 
5448 		tx_cb->tx_dma_handle_used = j;
5449 		QL_PRINT(DBG_TX, ("total tx_dma_handle_used %d cookies %d \n",
5450 		    j, oal_entries));
5451 
5452 		bp = (caddr_t)mp->b_rptr;
5453 	}
5454 	if (tx_mode == USE_COPY) {
5455 		bp = tx_cb->copy_buffer;
5456 		off = 0;
5457 		nbyte = 0;
5458 		frags = 0;
5459 		/*
5460 		 * Copy up to tx_buf_len of the transmit data
5461 		 * from mp to tx buffer
5462 		 */
5463 		for (tp = mp; tp != NULL; tp = tp->b_cont) {
5464 			nbyte = MBLKL(tp);
5465 			if ((off + nbyte) <= tx_buf_len) {
5466 				bcopy(tp->b_rptr, &bp[off], nbyte);
5467 				off += nbyte;
5468 				frags ++;
5469 			}
5470 		}
5471 
5472 		msg_len = off;
5473 
5474 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
5475 		mac_iocb_ptr->tid = tx_ring->prod_idx;
5476 		mac_iocb_ptr->frame_len = (uint32_t)cpu_to_le32(msg_len);
5477 		mac_iocb_ptr->txq_idx = tx_ring->wq_id;
5478 
5479 		QL_PRINT(DBG_TX, ("Copy Mode:actual sent data length is: %d, "
5480 		    "from %d segaments\n", msg_len, frags));
5481 
5482 		phys_addr = tx_cb->copy_buffer_dma_addr;
5483 		phy_addr_low = cpu_to_le32(LS_64BITS(phys_addr));
5484 		phy_addr_high = cpu_to_le32(MS_64BITS(phys_addr));
5485 
5486 		QL_DUMP(DBG_TX, "\t requested sending data:\n",
5487 		    (uint8_t *)tx_cb->copy_buffer, 8, total_len);
5488 
5489 		mac_iocb_ptr->oal_entry[0].buf_len = (uint32_t)
5490 		    cpu_to_le32(msg_len | OAL_LAST_ENTRY);
5491 		mac_iocb_ptr->oal_entry[0].buf_addr_low  = phy_addr_low;
5492 		mac_iocb_ptr->oal_entry[0].buf_addr_high = phy_addr_high;
5493 
5494 		freemsg(mp); /* no need, we have copied */
5495 		tx_cb->mp = NULL;
5496 	} /* End of Copy Mode */
5497 
5498 	/* Do TSO/LSO on TCP packet? */
5499 	if (use_lso && mss) {
5500 		ql_hw_lso_setup(qlge, mss, bp, mac_iocb_ptr);
5501 	} else if (pflags & qlge->chksum_cap) {
5502 		/* Do checksum offloading */
5503 		ql_hw_csum_setup(qlge, pflags, bp, mac_iocb_ptr);
5504 	}
5505 
5506 	/* let device know the latest outbound IOCB */
5507 	(void) ddi_dma_sync(tx_ring->wq_dma.dma_handle,
5508 	    (off_t)((uintptr_t)mac_iocb_ptr - (uintptr_t)tx_ring->wq_dma.vaddr),
5509 	    (size_t)sizeof (*mac_iocb_ptr), DDI_DMA_SYNC_FORDEV);
5510 
5511 	if (tx_mode == USE_DMA) {
5512 		/* let device know the latest outbound OAL if necessary */
5513 		if (oal_entries > TX_DESC_PER_IOCB) {
5514 			(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5515 			    (off_t)0,
5516 			    (sizeof (struct oal_entry) *
5517 			    (oal_entries -TX_DESC_PER_IOCB+1)),
5518 			    DDI_DMA_SYNC_FORDEV);
5519 		}
5520 	} else { /* for USE_COPY mode, tx buffer has changed */
5521 		/* let device know the latest change */
5522 		(void) ddi_dma_sync(tx_cb->oal_dma.dma_handle,
5523 		/* copy buf offset */
5524 		    (off_t)(sizeof (oal_entry) * MAX_SG_ELEMENTS),
5525 		    msg_len, DDI_DMA_SYNC_FORDEV);
5526 	}
5527 
5528 	/* save how the packet was sent */
5529 	tx_cb->tx_type = tx_mode;
5530 
5531 	QL_DUMP_REQ_PKT(qlge, mac_iocb_ptr, tx_cb->oal, oal_entries);
5532 	/* reduce the number of available tx slot */
5533 	atomic_dec_32(&tx_ring->tx_free_count);
5534 
5535 	tx_ring->prod_idx++;
5536 	if (tx_ring->prod_idx >= tx_ring->wq_len)
5537 		tx_ring->prod_idx = 0;
5538 
5539 	now = ddi_get_lbolt();
5540 	qlge->last_tx_time = now;
5541 
5542 	return (DDI_SUCCESS);
5543 
5544 bad:
5545 	/*
5546 	 * if for any reason driver can not send, delete
5547 	 * the message pointer, mp
5548 	 */
5549 	now = ddi_get_lbolt();
5550 	freemsg(mp);
5551 	mp = NULL;
5552 	tx_cb->mp = NULL;
5553 	for (i = 0; i < j; i++)
5554 		(void) ddi_dma_unbind_handle(tx_cb->tx_dma_handle[i]);
5555 
5556 	QL_PRINT(DBG_TX, ("%s(%d) failed at 0x%x",
5557 	    __func__, qlge->instance, (int)now));
5558 
5559 	return (DDI_SUCCESS);
5560 }
5561 
5562 
5563 /*
5564  * Initializes hardware and driver software flags before the driver
5565  * is finally ready to work.
5566  */
5567 int
5568 ql_do_start(qlge_t *qlge)
5569 {
5570 	int i;
5571 	struct rx_ring *rx_ring;
5572 	uint16_t lbq_buf_size;
5573 	int rings_done;
5574 
5575 	ASSERT(qlge != NULL);
5576 
5577 	mutex_enter(&qlge->hw_mutex);
5578 
5579 	/* Reset adapter */
5580 	(void) ql_asic_reset(qlge);
5581 
5582 	lbq_buf_size = (uint16_t)
5583 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
5584 	if (qlge->rx_ring[0].lbq_buf_size != lbq_buf_size) {
5585 #ifdef QLGE_LOAD_UNLOAD
5586 		cmn_err(CE_NOTE, "realloc buffers old: %d new: %d\n",
5587 		    qlge->rx_ring[0].lbq_buf_size, lbq_buf_size);
5588 #endif
5589 		/*
5590 		 * Check if any ring has buffers still with upper layers
5591 		 * If buffers are pending with upper layers, we use the
5592 		 * existing buffers and don't reallocate new ones
5593 		 * Unfortunately there is no way to evict buffers from
5594 		 * upper layers. Using buffers with the current size may
5595 		 * cause slightly sub-optimal performance, but that seems
5596 		 * to be the easiest way to handle this situation.
5597 		 */
5598 		rings_done = 0;
5599 		for (i = 0; i < qlge->rx_ring_count; i++) {
5600 			rx_ring = &qlge->rx_ring[i];
5601 			if (rx_ring->rx_indicate == 0)
5602 				rings_done++;
5603 			else
5604 				break;
5605 		}
5606 		/*
5607 		 * No buffers pending with upper layers;
5608 		 * reallocte them for new MTU size
5609 		 */
5610 		if (rings_done >= qlge->rx_ring_count) {
5611 			/* free large buffer pool */
5612 			for (i = 0; i < qlge->rx_ring_count; i++) {
5613 				rx_ring = &qlge->rx_ring[i];
5614 				if (rx_ring->type != TX_Q) {
5615 					ql_free_sbq_buffers(rx_ring);
5616 					ql_free_lbq_buffers(rx_ring);
5617 				}
5618 			}
5619 			/* reallocate large buffer pool */
5620 			for (i = 0; i < qlge->rx_ring_count; i++) {
5621 				rx_ring = &qlge->rx_ring[i];
5622 				if (rx_ring->type != TX_Q) {
5623 					(void) ql_alloc_sbufs(qlge, rx_ring);
5624 					(void) ql_alloc_lbufs(qlge, rx_ring);
5625 				}
5626 			}
5627 		}
5628 	}
5629 
5630 	if (ql_bringup_adapter(qlge) != DDI_SUCCESS) {
5631 		cmn_err(CE_WARN, "qlge bringup adapter failed");
5632 		mutex_exit(&qlge->hw_mutex);
5633 		if (qlge->fm_enable) {
5634 			atomic_or_32(&qlge->flags, ADAPTER_ERROR);
5635 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_LOST);
5636 		}
5637 		return (DDI_FAILURE);
5638 	}
5639 
5640 	mutex_exit(&qlge->hw_mutex);
5641 	/* if adapter is up successfully but was bad before */
5642 	if (qlge->flags & ADAPTER_ERROR) {
5643 		atomic_and_32(&qlge->flags, ~ADAPTER_ERROR);
5644 		if (qlge->fm_enable) {
5645 			ddi_fm_service_impact(qlge->dip, DDI_SERVICE_RESTORED);
5646 		}
5647 	}
5648 
5649 	/* Get current link state */
5650 	qlge->port_link_state = ql_get_link_state(qlge);
5651 
5652 	if (qlge->port_link_state == LS_UP) {
5653 		QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5654 		    __func__, qlge->instance));
5655 		/* If driver detects a carrier on */
5656 		CARRIER_ON(qlge);
5657 	} else {
5658 		QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5659 		    __func__, qlge->instance));
5660 		/* If driver detects a lack of carrier */
5661 		CARRIER_OFF(qlge);
5662 	}
5663 	qlge->mac_flags = QL_MAC_STARTED;
5664 	return (DDI_SUCCESS);
5665 }
5666 
5667 /*
5668  * Stop currently running driver
5669  * Driver needs to stop routing new packets to driver and wait until
5670  * all pending tx/rx buffers to be free-ed.
5671  */
5672 int
5673 ql_do_stop(qlge_t *qlge)
5674 {
5675 	int rc = DDI_FAILURE;
5676 	uint32_t i, j, k;
5677 	struct bq_desc *sbq_desc, *lbq_desc;
5678 	struct rx_ring *rx_ring;
5679 
5680 	ASSERT(qlge != NULL);
5681 
5682 	CARRIER_OFF(qlge);
5683 
5684 	rc = ql_bringdown_adapter(qlge);
5685 	if (rc != DDI_SUCCESS) {
5686 		cmn_err(CE_WARN, "qlge bringdown adapter failed.");
5687 	} else
5688 		rc = DDI_SUCCESS;
5689 
5690 	for (k = 0; k < qlge->rx_ring_count; k++) {
5691 		rx_ring = &qlge->rx_ring[k];
5692 		if (rx_ring->type != TX_Q) {
5693 			j = rx_ring->lbq_use_head;
5694 #ifdef QLGE_LOAD_UNLOAD
5695 			cmn_err(CE_NOTE, "ring %d: move %d lbufs in use list"
5696 			    " to free list %d\n total %d\n",
5697 			    k, rx_ring->lbuf_in_use_count,
5698 			    rx_ring->lbuf_free_count,
5699 			    rx_ring->lbuf_in_use_count +
5700 			    rx_ring->lbuf_free_count);
5701 #endif
5702 			for (i = 0; i < rx_ring->lbuf_in_use_count; i++) {
5703 				lbq_desc = rx_ring->lbuf_in_use[j];
5704 				j++;
5705 				if (j >= rx_ring->lbq_len) {
5706 					j = 0;
5707 				}
5708 				if (lbq_desc->mp) {
5709 					atomic_inc_32(&rx_ring->rx_indicate);
5710 					freemsg(lbq_desc->mp);
5711 				}
5712 			}
5713 			rx_ring->lbq_use_head = j;
5714 			rx_ring->lbq_use_tail = j;
5715 			rx_ring->lbuf_in_use_count = 0;
5716 			j = rx_ring->sbq_use_head;
5717 #ifdef QLGE_LOAD_UNLOAD
5718 			cmn_err(CE_NOTE, "ring %d: move %d sbufs in use list,"
5719 			    " to free list %d\n total %d \n",
5720 			    k, rx_ring->sbuf_in_use_count,
5721 			    rx_ring->sbuf_free_count,
5722 			    rx_ring->sbuf_in_use_count +
5723 			    rx_ring->sbuf_free_count);
5724 #endif
5725 			for (i = 0; i < rx_ring->sbuf_in_use_count; i++) {
5726 				sbq_desc = rx_ring->sbuf_in_use[j];
5727 				j++;
5728 				if (j >= rx_ring->sbq_len) {
5729 					j = 0;
5730 				}
5731 				if (sbq_desc->mp) {
5732 					atomic_inc_32(&rx_ring->rx_indicate);
5733 					freemsg(sbq_desc->mp);
5734 				}
5735 			}
5736 			rx_ring->sbq_use_head = j;
5737 			rx_ring->sbq_use_tail = j;
5738 			rx_ring->sbuf_in_use_count = 0;
5739 		}
5740 	}
5741 
5742 	qlge->mac_flags = QL_MAC_STOPPED;
5743 
5744 	return (rc);
5745 }
5746 
5747 /*
5748  * Support
5749  */
5750 
5751 void
5752 ql_disable_isr(qlge_t *qlge)
5753 {
5754 	/*
5755 	 * disable the hardware interrupt
5756 	 */
5757 	ISP_DISABLE_GLOBAL_INTRS(qlge);
5758 
5759 	qlge->flags &= ~INTERRUPTS_ENABLED;
5760 }
5761 
5762 
5763 
5764 /*
5765  * busy wait for 'usecs' microseconds.
5766  */
5767 void
5768 qlge_delay(clock_t usecs)
5769 {
5770 	drv_usecwait(usecs);
5771 }
5772 
5773 /*
5774  * retrieve firmware details.
5775  */
5776 
5777 pci_cfg_t *
5778 ql_get_pci_config(qlge_t *qlge)
5779 {
5780 	return (&(qlge->pci_cfg));
5781 }
5782 
5783 /*
5784  * Get current Link status
5785  */
5786 static uint32_t
5787 ql_get_link_state(qlge_t *qlge)
5788 {
5789 	uint32_t bitToCheck = 0;
5790 	uint32_t temp, linkState;
5791 
5792 	if (qlge->func_number == qlge->fn0_net) {
5793 		bitToCheck = STS_PL0;
5794 	} else {
5795 		bitToCheck = STS_PL1;
5796 	}
5797 	temp = ql_read_reg(qlge, REG_STATUS);
5798 	QL_PRINT(DBG_GLD, ("%s(%d) chip status reg: 0x%x\n",
5799 	    __func__, qlge->instance, temp));
5800 
5801 	if (temp & bitToCheck) {
5802 		linkState = LS_UP;
5803 	} else {
5804 		linkState = LS_DOWN;
5805 	}
5806 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
5807 		/* for Schultz, link Speed is fixed to 10G, full duplex */
5808 		qlge->speed  = SPEED_10G;
5809 		qlge->duplex = 1;
5810 	}
5811 	return (linkState);
5812 }
5813 /*
5814  * Get current link status and report to OS
5815  */
5816 static void
5817 ql_get_and_report_link_state(qlge_t *qlge)
5818 {
5819 	uint32_t cur_link_state;
5820 
5821 	/* Get current link state */
5822 	cur_link_state = ql_get_link_state(qlge);
5823 	/* if link state has changed */
5824 	if (cur_link_state != qlge->port_link_state) {
5825 
5826 		qlge->port_link_state = cur_link_state;
5827 
5828 		if (qlge->port_link_state == LS_UP) {
5829 			QL_PRINT(DBG_GLD, ("%s(%d) Link UP !!\n",
5830 			    __func__, qlge->instance));
5831 			/* If driver detects a carrier on */
5832 			CARRIER_ON(qlge);
5833 		} else {
5834 			QL_PRINT(DBG_GLD, ("%s(%d) Link down\n",
5835 			    __func__, qlge->instance));
5836 			/* If driver detects a lack of carrier */
5837 			CARRIER_OFF(qlge);
5838 		}
5839 	}
5840 }
5841 
5842 /*
5843  * timer callback function executed after timer expires
5844  */
5845 static void
5846 ql_timer(void* arg)
5847 {
5848 	ql_get_and_report_link_state((qlge_t *)arg);
5849 }
5850 
5851 /*
5852  * stop the running timer if activated
5853  */
5854 static void
5855 ql_stop_timer(qlge_t *qlge)
5856 {
5857 	timeout_id_t timer_id;
5858 	/* Disable driver timer */
5859 	if (qlge->ql_timer_timeout_id != NULL) {
5860 		timer_id = qlge->ql_timer_timeout_id;
5861 		qlge->ql_timer_timeout_id = NULL;
5862 		(void) untimeout(timer_id);
5863 	}
5864 }
5865 
5866 /*
5867  * stop then restart timer
5868  */
5869 void
5870 ql_restart_timer(qlge_t *qlge)
5871 {
5872 	ql_stop_timer(qlge);
5873 	qlge->ql_timer_ticks = TICKS_PER_SEC / 4;
5874 	qlge->ql_timer_timeout_id = timeout(ql_timer,
5875 	    (void *)qlge, qlge->ql_timer_ticks);
5876 }
5877 
5878 /* ************************************************************************* */
5879 /*
5880  *		Hardware K-Stats Data Structures and Subroutines
5881  */
5882 /* ************************************************************************* */
5883 static const ql_ksindex_t ql_kstats_hw[] = {
5884 	/* PCI related hardware information */
5885 	{ 0, "Vendor Id"			},
5886 	{ 1, "Device Id"			},
5887 	{ 2, "Command"				},
5888 	{ 3, "Status"				},
5889 	{ 4, "Revision Id"			},
5890 	{ 5, "Cache Line Size"			},
5891 	{ 6, "Latency Timer"			},
5892 	{ 7, "Header Type"			},
5893 	{ 9, "I/O base addr"			},
5894 	{ 10, "Control Reg Base addr low"	},
5895 	{ 11, "Control Reg Base addr high"	},
5896 	{ 12, "Doorbell Reg Base addr low"	},
5897 	{ 13, "Doorbell Reg Base addr high"	},
5898 	{ 14, "Subsystem Vendor Id"		},
5899 	{ 15, "Subsystem Device ID"		},
5900 	{ 16, "PCIe Device Control"		},
5901 	{ 17, "PCIe Link Status"		},
5902 
5903 	{ -1,	NULL				},
5904 };
5905 
5906 /*
5907  * kstat update function for PCI registers
5908  */
5909 static int
5910 ql_kstats_get_pci_regs(kstat_t *ksp, int flag)
5911 {
5912 	qlge_t *qlge;
5913 	kstat_named_t *knp;
5914 
5915 	if (flag != KSTAT_READ)
5916 		return (EACCES);
5917 
5918 	qlge = ksp->ks_private;
5919 	knp = ksp->ks_data;
5920 	(knp++)->value.ui32 = qlge->pci_cfg.vendor_id;
5921 	(knp++)->value.ui32 = qlge->pci_cfg.device_id;
5922 	(knp++)->value.ui32 = qlge->pci_cfg.command;
5923 	(knp++)->value.ui32 = qlge->pci_cfg.status;
5924 	(knp++)->value.ui32 = qlge->pci_cfg.revision;
5925 	(knp++)->value.ui32 = qlge->pci_cfg.cache_line_size;
5926 	(knp++)->value.ui32 = qlge->pci_cfg.latency_timer;
5927 	(knp++)->value.ui32 = qlge->pci_cfg.header_type;
5928 	(knp++)->value.ui32 = qlge->pci_cfg.io_base_address;
5929 	(knp++)->value.ui32 =
5930 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_lower;
5931 	(knp++)->value.ui32 =
5932 	    qlge->pci_cfg.pci_cntl_reg_set_mem_base_address_upper;
5933 	(knp++)->value.ui32 =
5934 	    qlge->pci_cfg.pci_doorbell_mem_base_address_lower;
5935 	(knp++)->value.ui32 =
5936 	    qlge->pci_cfg.pci_doorbell_mem_base_address_upper;
5937 	(knp++)->value.ui32 = qlge->pci_cfg.sub_vendor_id;
5938 	(knp++)->value.ui32 = qlge->pci_cfg.sub_device_id;
5939 	(knp++)->value.ui32 = qlge->pci_cfg.pcie_device_control;
5940 	(knp++)->value.ui32 = qlge->pci_cfg.link_status;
5941 
5942 	return (0);
5943 }
5944 
5945 static const ql_ksindex_t ql_kstats_mii[] = {
5946 	/* MAC/MII related hardware information */
5947 	{ 0, "mtu"},
5948 
5949 	{ -1, NULL},
5950 };
5951 
5952 
5953 /*
5954  * kstat update function for MII related information.
5955  */
5956 static int
5957 ql_kstats_mii_update(kstat_t *ksp, int flag)
5958 {
5959 	qlge_t *qlge;
5960 	kstat_named_t *knp;
5961 
5962 	if (flag != KSTAT_READ)
5963 		return (EACCES);
5964 
5965 	qlge = ksp->ks_private;
5966 	knp = ksp->ks_data;
5967 
5968 	(knp++)->value.ui32 = qlge->mtu;
5969 
5970 	return (0);
5971 }
5972 
5973 static const ql_ksindex_t ql_kstats_reg[] = {
5974 	/* Register information */
5975 	{ 0, "System (0x08)"			},
5976 	{ 1, "Reset/Fail Over(0x0Ch"		},
5977 	{ 2, "Function Specific Control(0x10)"	},
5978 	{ 3, "Status (0x30)"			},
5979 	{ 4, "Intr Enable (0x34)"		},
5980 	{ 5, "Intr Status1 (0x3C)"		},
5981 	{ 6, "Error Status (0x54)"		},
5982 	{ 7, "XGMAC Flow Control(0x11C)"	},
5983 	{ 8, "XGMAC Tx Pause Frames(0x230)"	},
5984 	{ 9, "XGMAC Rx Pause Frames(0x388)"	},
5985 	{ 10, "XGMAC Rx FIFO Drop Count(0x5B8)"	},
5986 	{ 11, "interrupts actually allocated"	},
5987 	{ 12, "interrupts on rx ring 0"		},
5988 	{ 13, "interrupts on rx ring 1"		},
5989 	{ 14, "interrupts on rx ring 2"		},
5990 	{ 15, "interrupts on rx ring 3"		},
5991 	{ 16, "interrupts on rx ring 4"		},
5992 	{ 17, "interrupts on rx ring 5"		},
5993 	{ 18, "interrupts on rx ring 6"		},
5994 	{ 19, "interrupts on rx ring 7"		},
5995 	{ 20, "polls on rx ring 0"		},
5996 	{ 21, "polls on rx ring 1"		},
5997 	{ 22, "polls on rx ring 2"		},
5998 	{ 23, "polls on rx ring 3"		},
5999 	{ 24, "polls on rx ring 4"		},
6000 	{ 25, "polls on rx ring 5"		},
6001 	{ 26, "polls on rx ring 6"		},
6002 	{ 27, "polls on rx ring 7"		},
6003 	{ 28, "tx no resource on ring 0"	},
6004 	{ 29, "tx dma bind fail on ring 0"	},
6005 	{ 30, "tx dma no handle on ring 0"	},
6006 	{ 31, "tx dma no cookie on ring 0"	},
6007 	{ 32, "MPI firmware major version"	},
6008 	{ 33, "MPI firmware minor version"	},
6009 	{ 34, "MPI firmware sub version"	},
6010 	{ 35, "rx no resource"			},
6011 
6012 	{ -1, NULL},
6013 };
6014 
6015 
6016 /*
6017  * kstat update function for device register set
6018  */
6019 static int
6020 ql_kstats_get_reg_and_dev_stats(kstat_t *ksp, int flag)
6021 {
6022 	qlge_t *qlge;
6023 	kstat_named_t *knp;
6024 	uint32_t val32;
6025 	int i = 0;
6026 	struct tx_ring *tx_ring;
6027 	struct rx_ring *rx_ring;
6028 
6029 	if (flag != KSTAT_READ)
6030 		return (EACCES);
6031 
6032 	qlge = ksp->ks_private;
6033 	knp = ksp->ks_data;
6034 
6035 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_SYSTEM);
6036 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_RESET_FAILOVER);
6037 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL);
6038 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_STATUS);
6039 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_ENABLE);
6040 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_INTERRUPT_STATUS_1);
6041 	(knp++)->value.ui32 = ql_read_reg(qlge, REG_ERROR_STATUS);
6042 
6043 	if (ql_sem_spinlock(qlge, qlge->xgmac_sem_mask)) {
6044 		return (0);
6045 	}
6046 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_FLOW_CONTROL, &val32);
6047 	(knp++)->value.ui32 = val32;
6048 
6049 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_TX_PAUSE_PKTS, &val32);
6050 	(knp++)->value.ui32 = val32;
6051 
6052 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_PAUSE_PKTS, &val32);
6053 	(knp++)->value.ui32 = val32;
6054 
6055 	(void) ql_read_xgmac_reg(qlge, REG_XGMAC_MAC_RX_FIFO_DROPS, &val32);
6056 	(knp++)->value.ui32 = val32;
6057 
6058 	ql_sem_unlock(qlge, qlge->xgmac_sem_mask);
6059 
6060 	(knp++)->value.ui32 = qlge->intr_cnt;
6061 
6062 	for (i = 0; i < 8; i++) {
6063 		(knp++)->value.ui32 = qlge->rx_interrupts[i];
6064 	}
6065 
6066 	for (i = 0; i < 8; i++) {
6067 		(knp++)->value.ui32 = qlge->rx_polls[i];
6068 	}
6069 
6070 	tx_ring = &qlge->tx_ring[0];
6071 	(knp++)->value.ui32 = tx_ring->defer;
6072 	(knp++)->value.ui32 = tx_ring->tx_fail_dma_bind;
6073 	(knp++)->value.ui32 = tx_ring->tx_no_dma_handle;
6074 	(knp++)->value.ui32 = tx_ring->tx_no_dma_cookie;
6075 
6076 	(knp++)->value.ui32 = qlge->fw_version_info.major_version;
6077 	(knp++)->value.ui32 = qlge->fw_version_info.minor_version;
6078 	(knp++)->value.ui32 = qlge->fw_version_info.sub_minor_version;
6079 
6080 	for (i = 0; i < qlge->rx_ring_count; i++) {
6081 		rx_ring = &qlge->rx_ring[i];
6082 		val32 += rx_ring->rx_packets_dropped_no_buffer;
6083 	}
6084 	(knp++)->value.ui32 = val32;
6085 
6086 	return (0);
6087 }
6088 
6089 
6090 static kstat_t *
6091 ql_setup_named_kstat(qlge_t *qlge, int instance, char *name,
6092     const ql_ksindex_t *ksip, size_t size, int (*update)(kstat_t *, int))
6093 {
6094 	kstat_t *ksp;
6095 	kstat_named_t *knp;
6096 	char *np;
6097 	int type;
6098 
6099 	size /= sizeof (ql_ksindex_t);
6100 	ksp = kstat_create(ADAPTER_NAME, instance, name, "net",
6101 	    KSTAT_TYPE_NAMED, ((uint32_t)size) - 1, KSTAT_FLAG_PERSISTENT);
6102 	if (ksp == NULL)
6103 		return (NULL);
6104 
6105 	ksp->ks_private = qlge;
6106 	ksp->ks_update = update;
6107 	for (knp = ksp->ks_data; (np = ksip->name) != NULL; ++knp, ++ksip) {
6108 		switch (*np) {
6109 		default:
6110 			type = KSTAT_DATA_UINT32;
6111 			break;
6112 		case '&':
6113 			np += 1;
6114 			type = KSTAT_DATA_CHAR;
6115 			break;
6116 		}
6117 		kstat_named_init(knp, np, (uint8_t)type);
6118 	}
6119 	kstat_install(ksp);
6120 
6121 	return (ksp);
6122 }
6123 
6124 /*
6125  * Setup various kstat
6126  */
6127 int
6128 ql_init_kstats(qlge_t *qlge)
6129 {
6130 	/* Hardware KStats */
6131 	qlge->ql_kstats[QL_KSTAT_CHIP] = ql_setup_named_kstat(qlge,
6132 	    qlge->instance, "chip", ql_kstats_hw,
6133 	    sizeof (ql_kstats_hw), ql_kstats_get_pci_regs);
6134 	if (qlge->ql_kstats[QL_KSTAT_CHIP] == NULL) {
6135 		return (DDI_FAILURE);
6136 	}
6137 
6138 	/* MII KStats */
6139 	qlge->ql_kstats[QL_KSTAT_LINK] = ql_setup_named_kstat(qlge,
6140 	    qlge->instance, "mii", ql_kstats_mii,
6141 	    sizeof (ql_kstats_mii), ql_kstats_mii_update);
6142 	if (qlge->ql_kstats[QL_KSTAT_LINK] == NULL) {
6143 		return (DDI_FAILURE);
6144 	}
6145 
6146 	/* REG KStats */
6147 	qlge->ql_kstats[QL_KSTAT_REG] = ql_setup_named_kstat(qlge,
6148 	    qlge->instance, "reg", ql_kstats_reg,
6149 	    sizeof (ql_kstats_reg), ql_kstats_get_reg_and_dev_stats);
6150 	if (qlge->ql_kstats[QL_KSTAT_REG] == NULL) {
6151 		return (DDI_FAILURE);
6152 	}
6153 	return (DDI_SUCCESS);
6154 }
6155 
6156 /*
6157  * delete all kstat
6158  */
6159 void
6160 ql_fini_kstats(qlge_t *qlge)
6161 {
6162 	int i;
6163 
6164 	for (i = 0; i < QL_KSTAT_COUNT; i++) {
6165 		if (qlge->ql_kstats[i] != NULL)
6166 			kstat_delete(qlge->ql_kstats[i]);
6167 	}
6168 }
6169 
6170 /* ************************************************************************* */
6171 /*
6172  *                                 kstat end
6173  */
6174 /* ************************************************************************* */
6175 
6176 /*
6177  * Setup the parameters for receive and transmit rings including buffer sizes
6178  * and completion queue sizes
6179  */
6180 static int
6181 ql_setup_rings(qlge_t *qlge)
6182 {
6183 	uint8_t i;
6184 	struct rx_ring *rx_ring;
6185 	struct tx_ring *tx_ring;
6186 	uint16_t lbq_buf_size;
6187 
6188 	lbq_buf_size = (uint16_t)
6189 	    ((qlge->mtu == ETHERMTU)? LRG_BUF_NORMAL_SIZE : LRG_BUF_JUMBO_SIZE);
6190 
6191 	/*
6192 	 * rx_ring[0] is always the default queue.
6193 	 */
6194 	/*
6195 	 * qlge->rx_ring_count:
6196 	 * Total number of rx_rings. This includes a number
6197 	 * of outbound completion handler rx_rings, and a
6198 	 * number of inbound completion handler rx_rings.
6199 	 * rss is only enabled if we have more than 1 rx completion
6200 	 * queue. If we have a single rx completion queue
6201 	 * then all rx completions go to this queue and
6202 	 * the last completion queue
6203 	 */
6204 
6205 	qlge->tx_ring_first_cq_id = qlge->rss_ring_count;
6206 
6207 	for (i = 0; i < qlge->tx_ring_count; i++) {
6208 		tx_ring = &qlge->tx_ring[i];
6209 		bzero((void *)tx_ring, sizeof (*tx_ring));
6210 		tx_ring->qlge = qlge;
6211 		tx_ring->wq_id = i;
6212 		tx_ring->wq_len = qlge->tx_ring_size;
6213 		tx_ring->wq_size = (uint32_t)(
6214 		    tx_ring->wq_len * sizeof (struct ob_mac_iocb_req));
6215 
6216 		/*
6217 		 * The completion queue ID for the tx rings start
6218 		 * immediately after the last rss completion queue.
6219 		 */
6220 		tx_ring->cq_id = (uint16_t)(i + qlge->tx_ring_first_cq_id);
6221 	}
6222 
6223 	for (i = 0; i < qlge->rx_ring_count; i++) {
6224 		rx_ring = &qlge->rx_ring[i];
6225 		bzero((void *)rx_ring, sizeof (*rx_ring));
6226 		rx_ring->qlge = qlge;
6227 		rx_ring->cq_id = i;
6228 		if (i != 0)
6229 			rx_ring->cpu = (i) % qlge->rx_ring_count;
6230 		else
6231 			rx_ring->cpu = 0;
6232 
6233 		if (i < qlge->rss_ring_count) {
6234 			/*
6235 			 * Inbound completions (RSS) queues
6236 			 * Default queue is queue 0 which handles
6237 			 * unicast plus bcast/mcast and async events.
6238 			 * Other inbound queues handle unicast frames only.
6239 			 */
6240 			rx_ring->cq_len = qlge->rx_ring_size;
6241 			rx_ring->cq_size = (uint32_t)
6242 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6243 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
6244 			rx_ring->lbq_size = (uint32_t)
6245 			    (rx_ring->lbq_len * sizeof (uint64_t));
6246 			rx_ring->lbq_buf_size = lbq_buf_size;
6247 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
6248 			rx_ring->sbq_size = (uint32_t)
6249 			    (rx_ring->sbq_len * sizeof (uint64_t));
6250 			rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
6251 			rx_ring->type = RX_Q;
6252 
6253 			QL_PRINT(DBG_GLD,
6254 			    ("%s(%d)Allocating rss completion queue %d "
6255 			    "on cpu %d\n", __func__, qlge->instance,
6256 			    rx_ring->cq_id, rx_ring->cpu));
6257 		} else {
6258 			/*
6259 			 * Outbound queue handles outbound completions only
6260 			 */
6261 			/* outbound cq is same size as tx_ring it services. */
6262 			QL_PRINT(DBG_INIT, ("rx_ring 0x%p i %d\n", rx_ring, i));
6263 			rx_ring->cq_len = qlge->tx_ring_size;
6264 			rx_ring->cq_size = (uint32_t)
6265 			    (rx_ring->cq_len * sizeof (struct net_rsp_iocb));
6266 			rx_ring->lbq_len = 0;
6267 			rx_ring->lbq_size = 0;
6268 			rx_ring->lbq_buf_size = 0;
6269 			rx_ring->sbq_len = 0;
6270 			rx_ring->sbq_size = 0;
6271 			rx_ring->sbq_buf_size = 0;
6272 			rx_ring->type = TX_Q;
6273 
6274 			QL_PRINT(DBG_GLD,
6275 			    ("%s(%d)Allocating TX completion queue %d on"
6276 			    " cpu %d\n", __func__, qlge->instance,
6277 			    rx_ring->cq_id, rx_ring->cpu));
6278 		}
6279 	}
6280 
6281 	return (DDI_SUCCESS);
6282 }
6283 
6284 static int
6285 ql_start_rx_ring(qlge_t *qlge, struct rx_ring *rx_ring)
6286 {
6287 	struct cqicb_t *cqicb = (struct cqicb_t *)rx_ring->cqicb_dma.vaddr;
6288 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6289 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6290 	/* first shadow area is used by wqicb's host copy of consumer index */
6291 	    + sizeof (uint64_t);
6292 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6293 	    (rx_ring->cq_id * sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE)
6294 	    + sizeof (uint64_t);
6295 	/* lrg/sml bufq pointers */
6296 	uint8_t *buf_q_base_reg =
6297 	    (uint8_t *)qlge->buf_q_ptr_base_addr_dma_attr.vaddr +
6298 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6299 	uint64_t buf_q_base_reg_dma =
6300 	    qlge->buf_q_ptr_base_addr_dma_attr.dma_addr +
6301 	    (rx_ring->cq_id * sizeof (uint64_t) * BUF_Q_PTR_SPACE);
6302 	caddr_t doorbell_area =
6303 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * (128 + rx_ring->cq_id));
6304 	int err = 0;
6305 	uint16_t bq_len;
6306 	uint64_t tmp;
6307 	uint64_t *base_indirect_ptr;
6308 	int page_entries;
6309 
6310 	/* Set up the shadow registers for this ring. */
6311 	rx_ring->prod_idx_sh_reg = shadow_reg;
6312 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
6313 	rx_ring->prod_idx_sh_reg_offset = (off_t)(((rx_ring->cq_id *
6314 	    sizeof (uint64_t) * RX_TX_RING_SHADOW_SPACE) + sizeof (uint64_t)));
6315 
6316 	rx_ring->lbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6317 	rx_ring->lbq_base_indirect_dma = buf_q_base_reg_dma;
6318 
6319 	QL_PRINT(DBG_INIT, ("%s rx ring(%d): prod_idx virtual addr = 0x%lx,"
6320 	    " phys_addr 0x%lx\n", __func__, rx_ring->cq_id,
6321 	    rx_ring->prod_idx_sh_reg, rx_ring->prod_idx_sh_reg_dma));
6322 
6323 	buf_q_base_reg += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6324 	buf_q_base_reg_dma += ((BUF_Q_PTR_SPACE / 2) * sizeof (uint64_t));
6325 	rx_ring->sbq_base_indirect = (uint64_t *)(void *)buf_q_base_reg;
6326 	rx_ring->sbq_base_indirect_dma = buf_q_base_reg_dma;
6327 
6328 	/* PCI doorbell mem area + 0x00 for consumer index register */
6329 	rx_ring->cnsmr_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6330 	rx_ring->cnsmr_idx = 0;
6331 	*rx_ring->prod_idx_sh_reg = 0;
6332 	rx_ring->curr_entry = rx_ring->cq_dma.vaddr;
6333 
6334 	/* PCI doorbell mem area + 0x04 for valid register */
6335 	rx_ring->valid_db_reg = (uint32_t *)(void *)
6336 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6337 
6338 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
6339 	rx_ring->lbq_prod_idx_db_reg = (uint32_t *)(void *)
6340 	    ((uint8_t *)(void *)doorbell_area + 0x18);
6341 
6342 	/* PCI doorbell mem area + 0x1c */
6343 	rx_ring->sbq_prod_idx_db_reg = (uint32_t *)(void *)
6344 	    ((uint8_t *)(void *)doorbell_area + 0x1c);
6345 
6346 	bzero((void *)cqicb, sizeof (*cqicb));
6347 
6348 	cqicb->msix_vect = (uint8_t)rx_ring->irq;
6349 
6350 	bq_len = (uint16_t)((rx_ring->cq_len == 65536) ?
6351 	    (uint16_t)0 : (uint16_t)rx_ring->cq_len);
6352 	cqicb->len = (uint16_t)cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
6353 
6354 	cqicb->cq_base_addr_lo =
6355 	    cpu_to_le32(LS_64BITS(rx_ring->cq_dma.dma_addr));
6356 	cqicb->cq_base_addr_hi =
6357 	    cpu_to_le32(MS_64BITS(rx_ring->cq_dma.dma_addr));
6358 
6359 	cqicb->prod_idx_addr_lo =
6360 	    cpu_to_le32(LS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6361 	cqicb->prod_idx_addr_hi =
6362 	    cpu_to_le32(MS_64BITS(rx_ring->prod_idx_sh_reg_dma));
6363 
6364 	/*
6365 	 * Set up the control block load flags.
6366 	 */
6367 	cqicb->flags = FLAGS_LC | /* Load queue base address */
6368 	    FLAGS_LV | /* Load MSI-X vector */
6369 	    FLAGS_LI;  /* Load irq delay values */
6370 	if (rx_ring->lbq_len) {
6371 		/* Load lbq values */
6372 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LL);
6373 		tmp = (uint64_t)rx_ring->lbq_dma.dma_addr;
6374 		base_indirect_ptr = (uint64_t *)rx_ring->lbq_base_indirect;
6375 		page_entries = 0;
6376 		do {
6377 			*base_indirect_ptr = cpu_to_le64(tmp);
6378 			tmp += VM_PAGE_SIZE;
6379 			base_indirect_ptr++;
6380 			page_entries++;
6381 		} while (page_entries < (int)(
6382 		    ((rx_ring->lbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6383 
6384 		cqicb->lbq_addr_lo =
6385 		    cpu_to_le32(LS_64BITS(rx_ring->lbq_base_indirect_dma));
6386 		cqicb->lbq_addr_hi =
6387 		    cpu_to_le32(MS_64BITS(rx_ring->lbq_base_indirect_dma));
6388 		bq_len = (uint16_t)((rx_ring->lbq_buf_size == 65536) ?
6389 		    (uint16_t)0 : (uint16_t)rx_ring->lbq_buf_size);
6390 		cqicb->lbq_buf_size = (uint16_t)cpu_to_le16(bq_len);
6391 		bq_len = (uint16_t)((rx_ring->lbq_len == 65536) ? (uint16_t)0 :
6392 		    (uint16_t)rx_ring->lbq_len);
6393 		cqicb->lbq_len = (uint16_t)cpu_to_le16(bq_len);
6394 		rx_ring->lbq_prod_idx = 0;
6395 		rx_ring->lbq_curr_idx = 0;
6396 	}
6397 	if (rx_ring->sbq_len) {
6398 		/* Load sbq values */
6399 		cqicb->flags = (uint8_t)(cqicb->flags | FLAGS_LS);
6400 		tmp = (uint64_t)rx_ring->sbq_dma.dma_addr;
6401 		base_indirect_ptr = (uint64_t *)rx_ring->sbq_base_indirect;
6402 		page_entries = 0;
6403 
6404 		do {
6405 			*base_indirect_ptr = cpu_to_le64(tmp);
6406 			tmp += VM_PAGE_SIZE;
6407 			base_indirect_ptr++;
6408 			page_entries++;
6409 		} while (page_entries < (uint32_t)
6410 		    (((rx_ring->sbq_len * sizeof (uint64_t)) / VM_PAGE_SIZE)));
6411 
6412 		cqicb->sbq_addr_lo =
6413 		    cpu_to_le32(LS_64BITS(rx_ring->sbq_base_indirect_dma));
6414 		cqicb->sbq_addr_hi =
6415 		    cpu_to_le32(MS_64BITS(rx_ring->sbq_base_indirect_dma));
6416 		cqicb->sbq_buf_size = (uint16_t)
6417 		    cpu_to_le16((uint16_t)(rx_ring->sbq_buf_size/2));
6418 		bq_len = (uint16_t)((rx_ring->sbq_len == 65536) ?
6419 		    (uint16_t)0 : (uint16_t)rx_ring->sbq_len);
6420 		cqicb->sbq_len = (uint16_t)cpu_to_le16(bq_len);
6421 		rx_ring->sbq_prod_idx = 0;
6422 		rx_ring->sbq_curr_idx = 0;
6423 	}
6424 	switch (rx_ring->type) {
6425 	case TX_Q:
6426 		cqicb->irq_delay = (uint16_t)
6427 		    cpu_to_le16(qlge->tx_coalesce_usecs);
6428 		cqicb->pkt_delay = (uint16_t)
6429 		    cpu_to_le16(qlge->tx_max_coalesced_frames);
6430 		break;
6431 
6432 	case DEFAULT_Q:
6433 		cqicb->irq_delay = (uint16_t)
6434 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6435 		cqicb->pkt_delay = (uint16_t)
6436 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6437 		break;
6438 
6439 	case RX_Q:
6440 		/*
6441 		 * Inbound completion handling rx_rings run in
6442 		 * separate NAPI contexts.
6443 		 */
6444 		cqicb->irq_delay = (uint16_t)
6445 		    cpu_to_le16(qlge->rx_coalesce_usecs);
6446 		cqicb->pkt_delay = (uint16_t)
6447 		    cpu_to_le16(qlge->rx_max_coalesced_frames);
6448 		break;
6449 	default:
6450 		cmn_err(CE_WARN, "Invalid rx_ring->type = %d.",
6451 		    rx_ring->type);
6452 	}
6453 	QL_PRINT(DBG_INIT, ("Initializing rx completion queue %d.\n",
6454 	    rx_ring->cq_id));
6455 	/* QL_DUMP_CQICB(qlge, cqicb); */
6456 	err = ql_write_cfg(qlge, CFG_LCQ, rx_ring->cqicb_dma.dma_addr,
6457 	    rx_ring->cq_id);
6458 	if (err) {
6459 		cmn_err(CE_WARN, "Failed to load CQICB.");
6460 		return (err);
6461 	}
6462 
6463 	rx_ring->rx_packets_dropped_no_buffer = 0;
6464 	rx_ring->rx_pkt_dropped_mac_unenabled = 0;
6465 	rx_ring->rx_failed_sbq_allocs = 0;
6466 	rx_ring->rx_failed_lbq_allocs = 0;
6467 	rx_ring->rx_packets = 0;
6468 	rx_ring->rx_bytes = 0;
6469 	rx_ring->frame_too_long = 0;
6470 	rx_ring->frame_too_short = 0;
6471 	rx_ring->fcs_err = 0;
6472 
6473 	return (err);
6474 }
6475 
6476 /*
6477  * start RSS
6478  */
6479 static int
6480 ql_start_rss(qlge_t *qlge)
6481 {
6482 	struct ricb *ricb = (struct ricb *)qlge->ricb_dma.vaddr;
6483 	int status = 0;
6484 	int i;
6485 	uint8_t *hash_id = (uint8_t *)ricb->hash_cq_id;
6486 
6487 	bzero((void *)ricb, sizeof (*ricb));
6488 
6489 	ricb->base_cq = RSS_L4K;
6490 	ricb->flags =
6491 	    (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
6492 	    RSS_RT6);
6493 	ricb->mask = (uint16_t)cpu_to_le16(RSS_HASH_CQ_ID_MAX - 1);
6494 
6495 	/*
6496 	 * Fill out the Indirection Table.
6497 	 */
6498 	for (i = 0; i < RSS_HASH_CQ_ID_MAX; i++)
6499 		hash_id[i] = (uint8_t)(i & (qlge->rss_ring_count - 1));
6500 
6501 	(void) memcpy(&ricb->ipv6_hash_key[0], key_data, 40);
6502 	(void) memcpy(&ricb->ipv4_hash_key[0], key_data, 16);
6503 
6504 	QL_PRINT(DBG_INIT, ("Initializing RSS.\n"));
6505 
6506 	status = ql_write_cfg(qlge, CFG_LR, qlge->ricb_dma.dma_addr, 0);
6507 	if (status) {
6508 		cmn_err(CE_WARN, "Failed to load RICB.");
6509 		return (status);
6510 	}
6511 
6512 	return (status);
6513 }
6514 
6515 /*
6516  * load a tx ring control block to hw and start this ring
6517  */
6518 static int
6519 ql_start_tx_ring(qlge_t *qlge, struct tx_ring *tx_ring)
6520 {
6521 	struct wqicb_t *wqicb = (struct wqicb_t *)tx_ring->wqicb_dma.vaddr;
6522 	caddr_t doorbell_area =
6523 	    qlge->doorbell_reg_iobase + (VM_PAGE_SIZE * tx_ring->wq_id);
6524 	void *shadow_reg = (uint8_t *)qlge->host_copy_shadow_dma_attr.vaddr +
6525 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6526 	uint64_t shadow_reg_dma = qlge->host_copy_shadow_dma_attr.dma_addr +
6527 	    (tx_ring->wq_id * sizeof (uint64_t)) * RX_TX_RING_SHADOW_SPACE;
6528 	int err = 0;
6529 
6530 	/*
6531 	 * Assign doorbell registers for this tx_ring.
6532 	 */
6533 
6534 	/* TX PCI doorbell mem area for tx producer index */
6535 	tx_ring->prod_idx_db_reg = (uint32_t *)(void *)doorbell_area;
6536 	tx_ring->prod_idx = 0;
6537 	/* TX PCI doorbell mem area + 0x04 */
6538 	tx_ring->valid_db_reg = (uint32_t *)(void *)
6539 	    ((uint8_t *)(void *)doorbell_area + 0x04);
6540 
6541 	/*
6542 	 * Assign shadow registers for this tx_ring.
6543 	 */
6544 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
6545 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
6546 	*tx_ring->cnsmr_idx_sh_reg = 0;
6547 
6548 	QL_PRINT(DBG_INIT, ("%s tx ring(%d): cnsmr_idx virtual addr = 0x%lx,"
6549 	    " phys_addr 0x%lx\n",
6550 	    __func__, tx_ring->wq_id, tx_ring->cnsmr_idx_sh_reg,
6551 	    tx_ring->cnsmr_idx_sh_reg_dma));
6552 
6553 	wqicb->len =
6554 	    (uint16_t)cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
6555 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
6556 	    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
6557 	wqicb->cq_id_rss = (uint16_t)cpu_to_le16(tx_ring->cq_id);
6558 	wqicb->rid = 0;
6559 	wqicb->wq_addr_lo = cpu_to_le32(LS_64BITS(tx_ring->wq_dma.dma_addr));
6560 	wqicb->wq_addr_hi = cpu_to_le32(MS_64BITS(tx_ring->wq_dma.dma_addr));
6561 	wqicb->cnsmr_idx_addr_lo =
6562 	    cpu_to_le32(LS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6563 	wqicb->cnsmr_idx_addr_hi =
6564 	    cpu_to_le32(MS_64BITS(tx_ring->cnsmr_idx_sh_reg_dma));
6565 
6566 	ql_init_tx_ring(tx_ring);
6567 	/* QL_DUMP_WQICB(qlge, wqicb); */
6568 	err = ql_write_cfg(qlge, CFG_LRQ, tx_ring->wqicb_dma.dma_addr,
6569 	    tx_ring->wq_id);
6570 
6571 	if (err) {
6572 		cmn_err(CE_WARN, "Failed to load WQICB.");
6573 		return (err);
6574 	}
6575 	return (err);
6576 }
6577 
6578 /*
6579  * Set up a MAC, multicast or VLAN address for the
6580  * inbound frame matching.
6581  */
6582 int
6583 ql_set_mac_addr_reg(qlge_t *qlge, uint8_t *addr, uint32_t type,
6584     uint16_t index)
6585 {
6586 	uint32_t offset = 0;
6587 	int status = DDI_SUCCESS;
6588 
6589 	switch (type) {
6590 	case MAC_ADDR_TYPE_MULTI_MAC:
6591 	case MAC_ADDR_TYPE_CAM_MAC: {
6592 		uint32_t cam_output;
6593 		uint32_t upper = (addr[0] << 8) | addr[1];
6594 		uint32_t lower =
6595 		    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
6596 		    (addr[5]);
6597 
6598 		QL_PRINT(DBG_INIT, ("Adding %s ", (type ==
6599 		    MAC_ADDR_TYPE_MULTI_MAC) ?
6600 		    "MULTICAST" : "UNICAST"));
6601 		QL_PRINT(DBG_INIT,
6602 		    ("addr %02x %02x %02x %02x %02x %02x at index %d in "
6603 		    "the CAM.\n",
6604 		    addr[0], addr[1], addr[2], addr[3], addr[4],
6605 		    addr[5], index));
6606 
6607 		status = ql_wait_reg_rdy(qlge,
6608 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6609 		if (status)
6610 			goto exit;
6611 		/* offset 0 - lower 32 bits of the MAC address */
6612 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6613 		    (offset++) |
6614 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6615 		    type);	/* type */
6616 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, lower);
6617 		status = ql_wait_reg_rdy(qlge,
6618 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6619 		if (status)
6620 			goto exit;
6621 		/* offset 1 - upper 16 bits of the MAC address */
6622 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6623 		    (offset++) |
6624 		    (index << MAC_ADDR_IDX_SHIFT) | /* index */
6625 		    type);	/* type */
6626 		ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA, upper);
6627 		status = ql_wait_reg_rdy(qlge,
6628 		    REG_MAC_PROTOCOL_ADDRESS_INDEX, MAC_ADDR_MW, 0);
6629 		if (status)
6630 			goto exit;
6631 		/* offset 2 - CQ ID associated with this MAC address */
6632 		ql_write_reg(qlge, REG_MAC_PROTOCOL_ADDRESS_INDEX,
6633 		    (offset) | (index << MAC_ADDR_IDX_SHIFT) |	/* index */
6634 		    type);	/* type */
6635 		/*
6636 		 * This field should also include the queue id
6637 		 * and possibly the function id.  Right now we hardcode
6638 		 * the route field to NIC core.
6639 		 */
6640 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
6641 			cam_output = (CAM_OUT_ROUTE_NIC |
6642 			    (qlge->func_number << CAM_OUT_FUNC_SHIFT) |
6643 			    (0 <<
6644 			    CAM_OUT_CQ_ID_SHIFT));
6645 
6646 			/* route to NIC core */
6647 			ql_write_reg(qlge, REG_MAC_PROTOCOL_DATA,
6648 			    cam_output);
6649 			}
6650 		break;
6651 		}
6652 	default:
6653 		cmn_err(CE_WARN,
6654 		    "Address type %d not yet supported.", type);
6655 		status = DDI_FAILURE;
6656 	}
6657 exit:
6658 	return (status);
6659 }
6660 
6661 /*
6662  * The NIC function for this chip has 16 routing indexes.  Each one can be used
6663  * to route different frame types to various inbound queues.  We send broadcast
6664  * multicast/error frames to the default queue for slow handling,
6665  * and CAM hit/RSS frames to the fast handling queues.
6666  */
6667 static int
6668 ql_set_routing_reg(qlge_t *qlge, uint32_t index, uint32_t mask, int enable)
6669 {
6670 	int status;
6671 	uint32_t value = 0;
6672 
6673 	QL_PRINT(DBG_INIT,
6674 	    ("%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
6675 	    (enable ? "Adding" : "Removing"),
6676 	    ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
6677 	    ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
6678 	    ((index ==
6679 	    RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
6680 	    ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
6681 	    ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
6682 	    ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
6683 	    ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
6684 	    ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
6685 	    ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
6686 	    ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
6687 	    ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
6688 	    ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
6689 	    ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
6690 	    ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
6691 	    ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
6692 	    ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
6693 	    (enable ? "to" : "from")));
6694 
6695 	switch (mask) {
6696 	case RT_IDX_CAM_HIT:
6697 		value = RT_IDX_DST_CAM_Q | /* dest */
6698 		    RT_IDX_TYPE_NICQ | /* type */
6699 		    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT); /* index */
6700 		break;
6701 
6702 	case RT_IDX_VALID: /* Promiscuous Mode frames. */
6703 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6704 		    RT_IDX_TYPE_NICQ |	/* type */
6705 		    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT); /* index */
6706 		break;
6707 
6708 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
6709 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6710 		    RT_IDX_TYPE_NICQ |	/* type */
6711 		    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */
6712 		break;
6713 
6714 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
6715 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6716 		    RT_IDX_TYPE_NICQ |	/* type */
6717 		    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT); /* index */
6718 		break;
6719 
6720 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
6721 		value = RT_IDX_DST_CAM_Q |	/* dest */
6722 		    RT_IDX_TYPE_NICQ |	/* type */
6723 		    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT); /* index */
6724 		break;
6725 
6726 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
6727 		value = RT_IDX_DST_CAM_Q |	/* dest */
6728 		    RT_IDX_TYPE_NICQ |	/* type */
6729 		    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6730 		break;
6731 
6732 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
6733 		value = RT_IDX_DST_RSS |	/* dest */
6734 		    RT_IDX_TYPE_NICQ |	/* type */
6735 		    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT); /* index */
6736 		break;
6737 
6738 	case 0:	/* Clear the E-bit on an entry. */
6739 		value = RT_IDX_DST_DFLT_Q |	/* dest */
6740 		    RT_IDX_TYPE_NICQ |	/* type */
6741 		    (index << RT_IDX_IDX_SHIFT); /* index */
6742 		break;
6743 
6744 	default:
6745 		cmn_err(CE_WARN, "Mask type %d not yet supported.",
6746 		    mask);
6747 		status = -EPERM;
6748 		goto exit;
6749 	}
6750 
6751 	if (value != 0) {
6752 		status = ql_wait_reg_rdy(qlge, REG_ROUTING_INDEX, RT_IDX_MW, 0);
6753 		if (status)
6754 			goto exit;
6755 		value |= (enable ? RT_IDX_E : 0);
6756 		ql_write_reg(qlge, REG_ROUTING_INDEX, value);
6757 		ql_write_reg(qlge, REG_ROUTING_DATA, enable ? mask : 0);
6758 	}
6759 
6760 exit:
6761 	return (status);
6762 }
6763 
6764 /*
6765  * Clear all the entries in the routing table.
6766  * Caller must get semaphore in advance.
6767  */
6768 
6769 static int
6770 ql_stop_routing(qlge_t *qlge)
6771 {
6772 	int status = 0;
6773 	int i;
6774 	/* Clear all the entries in the routing table. */
6775 	for (i = 0; i < 16; i++) {
6776 		status = ql_set_routing_reg(qlge, i, 0, 0);
6777 		if (status) {
6778 			cmn_err(CE_WARN, "Stop routing failed. ");
6779 		}
6780 	}
6781 	return (status);
6782 }
6783 
6784 /* Initialize the frame-to-queue routing. */
6785 static int
6786 ql_route_initialize(qlge_t *qlge)
6787 {
6788 	int status = 0;
6789 
6790 	status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
6791 	if (status != DDI_SUCCESS)
6792 		return (status);
6793 
6794 	/* Clear all the entries in the routing table. */
6795 	status = ql_stop_routing(qlge);
6796 	if (status) {
6797 		goto exit;
6798 	}
6799 	status = ql_set_routing_reg(qlge, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
6800 	if (status) {
6801 		cmn_err(CE_WARN,
6802 		    "Failed to init routing register for broadcast packets.");
6803 		goto exit;
6804 	}
6805 	/*
6806 	 * If we have more than one inbound queue, then turn on RSS in the
6807 	 * routing block.
6808 	 */
6809 	if (qlge->rss_ring_count > 1) {
6810 		status = ql_set_routing_reg(qlge, RT_IDX_RSS_MATCH_SLOT,
6811 		    RT_IDX_RSS_MATCH, 1);
6812 		if (status) {
6813 			cmn_err(CE_WARN,
6814 			    "Failed to init routing register for MATCH RSS "
6815 			    "packets.");
6816 			goto exit;
6817 		}
6818 	}
6819 
6820 	status = ql_set_routing_reg(qlge, RT_IDX_CAM_HIT_SLOT,
6821 	    RT_IDX_CAM_HIT, 1);
6822 	if (status) {
6823 		cmn_err(CE_WARN,
6824 		    "Failed to init routing register for CAM packets.");
6825 		goto exit;
6826 	}
6827 
6828 	status = ql_set_routing_reg(qlge, RT_IDX_MCAST_MATCH_SLOT,
6829 	    RT_IDX_MCAST_MATCH, 1);
6830 	if (status) {
6831 		cmn_err(CE_WARN,
6832 		    "Failed to init routing register for Multicast "
6833 		    "packets.");
6834 	}
6835 
6836 exit:
6837 	ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
6838 	return (status);
6839 }
6840 
6841 /*
6842  * Initialize hardware
6843  */
6844 static int
6845 ql_device_initialize(qlge_t *qlge)
6846 {
6847 	uint32_t value, mask;
6848 	int i;
6849 	int status = 0;
6850 	uint16_t pause = PAUSE_MODE_DISABLED;
6851 	boolean_t update_port_config = B_FALSE;
6852 	uint32_t pause_bit_mask;
6853 	boolean_t dcbx_enable = B_FALSE;
6854 	uint32_t dcbx_bit_mask = 0x10;
6855 	/*
6856 	 * Set up the System register to halt on errors.
6857 	 */
6858 	value = SYS_EFE | SYS_FAE;
6859 	mask = value << 16;
6860 	ql_write_reg(qlge, REG_SYSTEM, mask | value);
6861 
6862 	/* Set the default queue. */
6863 	value = NIC_RCV_CFG_DFQ;
6864 	mask = NIC_RCV_CFG_DFQ_MASK;
6865 
6866 	ql_write_reg(qlge, REG_NIC_RECEIVE_CONFIGURATION, mask | value);
6867 
6868 	/* Enable the MPI interrupt. */
6869 	ql_write_reg(qlge, REG_INTERRUPT_MASK, (INTR_MASK_PI << 16)
6870 	    | INTR_MASK_PI);
6871 	/* Enable the function, set pagesize, enable error checking. */
6872 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
6873 	    FSC_EC | FSC_VM_PAGE_4K | FSC_DBRST_1024;
6874 	/* Set/clear header splitting. */
6875 	if (CFG_IST(qlge, CFG_ENABLE_SPLIT_HEADER)) {
6876 		value |= FSC_SH;
6877 		ql_write_reg(qlge, REG_SPLIT_HEADER, SMALL_BUFFER_SIZE);
6878 	}
6879 	mask = FSC_VM_PAGESIZE_MASK |
6880 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
6881 	ql_write_reg(qlge, REG_FUNCTION_SPECIFIC_CONTROL, mask | value);
6882 	/*
6883 	 * check current port max frame size, if different from OS setting,
6884 	 * then we need to change
6885 	 */
6886 	qlge->max_frame_size =
6887 	    (qlge->mtu == ETHERMTU)? NORMAL_FRAME_SIZE : JUMBO_FRAME_SIZE;
6888 
6889 	mutex_enter(&qlge->mbx_mutex);
6890 	status = ql_get_port_cfg(qlge);
6891 	mutex_exit(&qlge->mbx_mutex);
6892 
6893 	if (status == DDI_SUCCESS) {
6894 		/* if current frame size is smaller than required size */
6895 		if (qlge->port_cfg_info.max_frame_size <
6896 		    qlge->max_frame_size) {
6897 			QL_PRINT(DBG_MBX,
6898 			    ("update frame size, current %d, new %d\n",
6899 			    qlge->port_cfg_info.max_frame_size,
6900 			    qlge->max_frame_size));
6901 			qlge->port_cfg_info.max_frame_size =
6902 			    qlge->max_frame_size;
6903 			qlge->port_cfg_info.link_cfg |= ENABLE_JUMBO;
6904 			update_port_config = B_TRUE;
6905 		}
6906 
6907 		if (qlge->port_cfg_info.link_cfg & STD_PAUSE)
6908 			pause = PAUSE_MODE_STANDARD;
6909 		else if (qlge->port_cfg_info.link_cfg & PP_PAUSE)
6910 			pause = PAUSE_MODE_PER_PRIORITY;
6911 
6912 		if (pause != qlge->pause) {
6913 			pause_bit_mask = 0x60;	/* bit 5-6 */
6914 			/* clear pause bits */
6915 			qlge->port_cfg_info.link_cfg &= ~pause_bit_mask;
6916 			if (qlge->pause == PAUSE_MODE_STANDARD)
6917 				qlge->port_cfg_info.link_cfg |= STD_PAUSE;
6918 			else if (qlge->pause == PAUSE_MODE_PER_PRIORITY)
6919 				qlge->port_cfg_info.link_cfg |= PP_PAUSE;
6920 			update_port_config = B_TRUE;
6921 		}
6922 
6923 		if (qlge->port_cfg_info.link_cfg & DCBX_ENABLE)
6924 			dcbx_enable = B_TRUE;
6925 		if (dcbx_enable != qlge->dcbx_enable) {
6926 			qlge->port_cfg_info.link_cfg &= ~dcbx_bit_mask;
6927 			if (qlge->dcbx_enable)
6928 				qlge->port_cfg_info.link_cfg |= DCBX_ENABLE;
6929 		}
6930 
6931 		update_port_config = B_TRUE;
6932 
6933 		/* if need to update port configuration */
6934 		if (update_port_config) {
6935 			mutex_enter(&qlge->mbx_mutex);
6936 			(void) ql_set_mpi_port_config(qlge,
6937 			    qlge->port_cfg_info);
6938 			mutex_exit(&qlge->mbx_mutex);
6939 		}
6940 	} else
6941 		cmn_err(CE_WARN, "ql_get_port_cfg failed");
6942 
6943 	/* Start up the rx queues. */
6944 	for (i = 0; i < qlge->rx_ring_count; i++) {
6945 		status = ql_start_rx_ring(qlge, &qlge->rx_ring[i]);
6946 		if (status) {
6947 			cmn_err(CE_WARN,
6948 			    "Failed to start rx ring[%d]", i);
6949 			return (status);
6950 		}
6951 	}
6952 
6953 	/*
6954 	 * If there is more than one inbound completion queue
6955 	 * then download a RICB to configure RSS.
6956 	 */
6957 	if (qlge->rss_ring_count > 1) {
6958 		status = ql_start_rss(qlge);
6959 		if (status) {
6960 			cmn_err(CE_WARN, "Failed to start RSS.");
6961 			return (status);
6962 		}
6963 	}
6964 
6965 	/* Start up the tx queues. */
6966 	for (i = 0; i < qlge->tx_ring_count; i++) {
6967 		status = ql_start_tx_ring(qlge, &qlge->tx_ring[i]);
6968 		if (status) {
6969 			cmn_err(CE_WARN,
6970 			    "Failed to start tx ring[%d]", i);
6971 			return (status);
6972 		}
6973 	}
6974 	qlge->selected_tx_ring = 0;
6975 	/* Set the frame routing filter. */
6976 	status = ql_route_initialize(qlge);
6977 	if (status) {
6978 		cmn_err(CE_WARN,
6979 		    "Failed to init CAM/Routing tables.");
6980 		return (status);
6981 	}
6982 
6983 	return (status);
6984 }
6985 /*
6986  * Issue soft reset to chip.
6987  */
6988 static int
6989 ql_asic_reset(qlge_t *qlge)
6990 {
6991 	int status = DDI_SUCCESS;
6992 
6993 	ql_write_reg(qlge, REG_RESET_FAILOVER, FUNCTION_RESET_MASK
6994 	    |FUNCTION_RESET);
6995 
6996 	if (ql_wait_reg_bit(qlge, REG_RESET_FAILOVER, FUNCTION_RESET,
6997 	    BIT_RESET, 0) != DDI_SUCCESS) {
6998 		cmn_err(CE_WARN,
6999 		    "TIMEOUT!!! errored out of resetting the chip!");
7000 		status = DDI_FAILURE;
7001 	}
7002 
7003 	return (status);
7004 }
7005 
7006 /*
7007  * If there are more than MIN_BUFFERS_ARM_COUNT small buffer descriptors in
7008  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7009  * to be used by hardware.
7010  */
7011 static void
7012 ql_arm_sbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7013 {
7014 	struct bq_desc *sbq_desc;
7015 	int i;
7016 	uint64_t *sbq_entry = rx_ring->sbq_dma.vaddr;
7017 	uint32_t arm_count;
7018 
7019 	if (rx_ring->sbuf_free_count > rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT)
7020 		arm_count = (rx_ring->sbq_len-MIN_BUFFERS_ARM_COUNT);
7021 	else {
7022 		/* Adjust to a multiple of 16 */
7023 		arm_count = (rx_ring->sbuf_free_count / 16) * 16;
7024 #ifdef QLGE_LOAD_UNLOAD
7025 		cmn_err(CE_NOTE, "adjust sbuf arm_count %d\n", arm_count);
7026 #endif
7027 	}
7028 	for (i = 0; i < arm_count; i++) {
7029 		sbq_desc = ql_get_sbuf_from_free_list(rx_ring);
7030 		if (sbq_desc == NULL)
7031 			break;
7032 		/* Arm asic */
7033 		*sbq_entry = cpu_to_le64(sbq_desc->bd_dma.dma_addr);
7034 		sbq_entry++;
7035 
7036 		/* link the descriptors to in_use_list */
7037 		ql_add_sbuf_to_in_use_list(rx_ring, sbq_desc);
7038 		rx_ring->sbq_prod_idx++;
7039 	}
7040 	ql_update_sbq_prod_idx(qlge, rx_ring);
7041 }
7042 
7043 /*
7044  * If there are more than MIN_BUFFERS_ARM_COUNT large buffer descriptors in
7045  * its free list, move xMIN_BUFFERS_ARM_COUNT descriptors to its in use list
7046  * to be used by hardware.
7047  */
7048 static void
7049 ql_arm_lbuf(qlge_t *qlge, struct rx_ring *rx_ring)
7050 {
7051 	struct bq_desc *lbq_desc;
7052 	int i;
7053 	uint64_t *lbq_entry = rx_ring->lbq_dma.vaddr;
7054 	uint32_t arm_count;
7055 
7056 	if (rx_ring->lbuf_free_count > rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT)
7057 		arm_count = (rx_ring->lbq_len-MIN_BUFFERS_ARM_COUNT);
7058 	else {
7059 		/* Adjust to a multiple of 16 */
7060 		arm_count = (rx_ring->lbuf_free_count / 16) * 16;
7061 #ifdef QLGE_LOAD_UNLOAD
7062 		cmn_err(CE_NOTE, "adjust lbuf arm_count %d\n", arm_count);
7063 #endif
7064 	}
7065 	for (i = 0; i < arm_count; i++) {
7066 		lbq_desc = ql_get_lbuf_from_free_list(rx_ring);
7067 		if (lbq_desc == NULL)
7068 			break;
7069 		/* Arm asic */
7070 		*lbq_entry = cpu_to_le64(lbq_desc->bd_dma.dma_addr);
7071 		lbq_entry++;
7072 
7073 		/* link the descriptors to in_use_list */
7074 		ql_add_lbuf_to_in_use_list(rx_ring, lbq_desc);
7075 		rx_ring->lbq_prod_idx++;
7076 	}
7077 	ql_update_lbq_prod_idx(qlge, rx_ring);
7078 }
7079 
7080 
7081 /*
7082  * Initializes the adapter by configuring request and response queues,
7083  * allocates and ARMs small and large receive buffers to the
7084  * hardware
7085  */
7086 static int
7087 ql_bringup_adapter(qlge_t *qlge)
7088 {
7089 	int i;
7090 
7091 	if (ql_device_initialize(qlge) != DDI_SUCCESS) {
7092 		cmn_err(CE_WARN, "?%s(%d): ql_device_initialize failed",
7093 		    __func__, qlge->instance);
7094 		goto err_bringup;
7095 	}
7096 	qlge->sequence |= INIT_ADAPTER_UP;
7097 
7098 #ifdef QLGE_TRACK_BUFFER_USAGE
7099 	for (i = 0; i < qlge->rx_ring_count; i++) {
7100 		if (qlge->rx_ring[i].type != TX_Q) {
7101 			qlge->rx_sb_low_count[i] = NUM_SMALL_BUFFERS;
7102 			qlge->rx_lb_low_count[i] = NUM_LARGE_BUFFERS;
7103 		}
7104 		qlge->cq_low_count[i] = NUM_RX_RING_ENTRIES;
7105 	}
7106 #endif
7107 	/* Arm buffers */
7108 	for (i = 0; i < qlge->rx_ring_count; i++) {
7109 		if (qlge->rx_ring[i].type != TX_Q) {
7110 			ql_arm_sbuf(qlge, &qlge->rx_ring[i]);
7111 			ql_arm_lbuf(qlge, &qlge->rx_ring[i]);
7112 		}
7113 	}
7114 
7115 	/* Enable work/request queues */
7116 	for (i = 0; i < qlge->tx_ring_count; i++) {
7117 		if (qlge->tx_ring[i].valid_db_reg)
7118 			ql_write_doorbell_reg(qlge,
7119 			    qlge->tx_ring[i].valid_db_reg,
7120 			    REQ_Q_VALID);
7121 	}
7122 
7123 	/* Enable completion queues */
7124 	for (i = 0; i < qlge->rx_ring_count; i++) {
7125 		if (qlge->rx_ring[i].valid_db_reg)
7126 			ql_write_doorbell_reg(qlge,
7127 			    qlge->rx_ring[i].valid_db_reg,
7128 			    RSP_Q_VALID);
7129 	}
7130 
7131 	for (i = 0; i < qlge->tx_ring_count; i++) {
7132 		mutex_enter(&qlge->tx_ring[i].tx_lock);
7133 		qlge->tx_ring[i].mac_flags = QL_MAC_STARTED;
7134 		mutex_exit(&qlge->tx_ring[i].tx_lock);
7135 	}
7136 
7137 	for (i = 0; i < qlge->rx_ring_count; i++) {
7138 		mutex_enter(&qlge->rx_ring[i].rx_lock);
7139 		qlge->rx_ring[i].mac_flags = QL_MAC_STARTED;
7140 		mutex_exit(&qlge->rx_ring[i].rx_lock);
7141 	}
7142 
7143 	/* This mutex will get re-acquired in enable_completion interrupt */
7144 	mutex_exit(&qlge->hw_mutex);
7145 	/* Traffic can start flowing now */
7146 	ql_enable_all_completion_interrupts(qlge);
7147 	mutex_enter(&qlge->hw_mutex);
7148 
7149 	ql_enable_global_interrupt(qlge);
7150 
7151 	qlge->sequence |= ADAPTER_INIT;
7152 	return (DDI_SUCCESS);
7153 
7154 err_bringup:
7155 	(void) ql_asic_reset(qlge);
7156 	return (DDI_FAILURE);
7157 }
7158 
7159 /*
7160  * Initialize mutexes of each rx/tx rings
7161  */
7162 static int
7163 ql_init_rx_tx_locks(qlge_t *qlge)
7164 {
7165 	struct tx_ring *tx_ring;
7166 	struct rx_ring *rx_ring;
7167 	int i;
7168 
7169 	for (i = 0; i < qlge->tx_ring_count; i++) {
7170 		tx_ring = &qlge->tx_ring[i];
7171 		mutex_init(&tx_ring->tx_lock, NULL, MUTEX_DRIVER,
7172 		    DDI_INTR_PRI(qlge->intr_pri));
7173 	}
7174 
7175 	for (i = 0; i < qlge->rx_ring_count; i++) {
7176 		rx_ring = &qlge->rx_ring[i];
7177 		mutex_init(&rx_ring->rx_lock, NULL, MUTEX_DRIVER,
7178 		    DDI_INTR_PRI(qlge->intr_pri));
7179 		mutex_init(&rx_ring->sbq_lock, NULL, MUTEX_DRIVER,
7180 		    DDI_INTR_PRI(qlge->intr_pri));
7181 		mutex_init(&rx_ring->lbq_lock, NULL, MUTEX_DRIVER,
7182 		    DDI_INTR_PRI(qlge->intr_pri));
7183 	}
7184 
7185 	return (DDI_SUCCESS);
7186 }
7187 
7188 /*ARGSUSED*/
7189 /*
7190  * Simply call pci_ereport_post which generates ereports for errors
7191  * that occur in the PCI local bus configuration status registers.
7192  */
7193 static int
7194 ql_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7195 {
7196 	pci_ereport_post(dip, err, NULL);
7197 	return (err->fme_status);
7198 }
7199 
7200 static void
7201 ql_fm_init(qlge_t *qlge)
7202 {
7203 	ddi_iblock_cookie_t iblk;
7204 
7205 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) entered, FMA capability %x\n",
7206 	    qlge->instance, qlge->fm_capabilities));
7207 	/*
7208 	 * Register capabilities with IO Fault Services. The capabilities
7209 	 * set above may not be supported by the parent nexus, in that case
7210 	 * some capability bits may be cleared.
7211 	 */
7212 	if (qlge->fm_capabilities)
7213 		ddi_fm_init(qlge->dip, &qlge->fm_capabilities, &iblk);
7214 
7215 	/*
7216 	 * Initialize pci ereport capabilities if ereport capable
7217 	 */
7218 	if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7219 	    DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7220 		pci_ereport_setup(qlge->dip);
7221 	}
7222 
7223 	/* Register error callback if error callback capable */
7224 	if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities)) {
7225 		ddi_fm_handler_register(qlge->dip,
7226 		    ql_fm_error_cb, (void*) qlge);
7227 	}
7228 
7229 	/*
7230 	 * DDI_FLGERR_ACC indicates:
7231 	 *  Driver will check its access handle(s) for faults on
7232 	 *   a regular basis by calling ddi_fm_acc_err_get
7233 	 *  Driver is able to cope with incorrect results of I/O
7234 	 *   operations resulted from an I/O fault
7235 	 */
7236 	if (DDI_FM_ACC_ERR_CAP(qlge->fm_capabilities)) {
7237 		ql_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7238 	}
7239 
7240 	/*
7241 	 * DDI_DMA_FLAGERR indicates:
7242 	 *  Driver will check its DMA handle(s) for faults on a
7243 	 *   regular basis using ddi_fm_dma_err_get
7244 	 *  Driver is able to cope with incorrect results of DMA
7245 	 *   operations resulted from an I/O fault
7246 	 */
7247 	if (DDI_FM_DMA_ERR_CAP(qlge->fm_capabilities)) {
7248 		tx_mapping_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7249 		dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7250 	}
7251 	QL_PRINT(DBG_INIT, ("ql_fm_init(%d) done\n",
7252 	    qlge->instance));
7253 }
7254 
7255 static void
7256 ql_fm_fini(qlge_t *qlge)
7257 {
7258 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) entered\n",
7259 	    qlge->instance));
7260 	/* Only unregister FMA capabilities if we registered some */
7261 	if (qlge->fm_capabilities) {
7262 
7263 		/*
7264 		 * Release any resources allocated by pci_ereport_setup()
7265 		 */
7266 		if (DDI_FM_EREPORT_CAP(qlge->fm_capabilities) ||
7267 		    DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7268 			pci_ereport_teardown(qlge->dip);
7269 
7270 		/*
7271 		 * Un-register error callback if error callback capable
7272 		 */
7273 		if (DDI_FM_ERRCB_CAP(qlge->fm_capabilities))
7274 			ddi_fm_handler_unregister(qlge->dip);
7275 
7276 		/* Unregister from IO Fault Services */
7277 		ddi_fm_fini(qlge->dip);
7278 	}
7279 	QL_PRINT(DBG_INIT, ("ql_fm_fini(%d) done\n",
7280 	    qlge->instance));
7281 }
7282 /*
7283  * ql_attach - Driver attach.
7284  */
7285 static int
7286 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
7287 {
7288 	int instance;
7289 	qlge_t *qlge = NULL;
7290 	int rval;
7291 	uint16_t w;
7292 	mac_register_t *macp = NULL;
7293 	uint32_t data;
7294 
7295 	rval = DDI_FAILURE;
7296 
7297 	/* first get the instance */
7298 	instance = ddi_get_instance(dip);
7299 
7300 	switch (cmd) {
7301 	case DDI_ATTACH:
7302 		/*
7303 		 * Allocate our per-device-instance structure
7304 		 */
7305 		qlge = (qlge_t *)kmem_zalloc(sizeof (*qlge), KM_SLEEP);
7306 		ASSERT(qlge != NULL);
7307 		qlge->sequence |= INIT_SOFTSTATE_ALLOC;
7308 
7309 		qlge->dip = dip;
7310 		qlge->instance = instance;
7311 		/* Set up the coalescing parameters. */
7312 		qlge->ql_dbgprnt = 0;
7313 #if QL_DEBUG
7314 		qlge->ql_dbgprnt = QL_DEBUG;
7315 #endif /* QL_DEBUG */
7316 
7317 		/*
7318 		 * Initialize for fma support
7319 		 */
7320 		/* fault management (fm) capabilities. */
7321 		qlge->fm_capabilities =
7322 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE;
7323 		data = ql_get_prop(qlge, "fm-capable");
7324 		if (data <= 0xf) {
7325 			qlge->fm_capabilities = data;
7326 		}
7327 		ql_fm_init(qlge);
7328 		qlge->sequence |= INIT_FM;
7329 		QL_PRINT(DBG_INIT, ("ql_attach(%d): fma init done\n",
7330 		    qlge->instance));
7331 
7332 		/*
7333 		 * Setup the ISP8x00 registers address mapping to be
7334 		 * accessed by this particular driver.
7335 		 * 0x0   Configuration Space
7336 		 * 0x1   I/O Space
7337 		 * 0x2   1st Memory Space address - Control Register Set
7338 		 * 0x3   2nd Memory Space address - Doorbell Memory Space
7339 		 */
7340 		w = 2;
7341 		if (ddi_regs_map_setup(dip, w, (caddr_t *)&qlge->iobase, 0,
7342 		    sizeof (dev_reg_t), &ql_dev_acc_attr,
7343 		    &qlge->dev_handle) != DDI_SUCCESS) {
7344 			cmn_err(CE_WARN, "%s(%d): Unable to map device "
7345 			    "registers", ADAPTER_NAME, instance);
7346 			break;
7347 		}
7348 		QL_PRINT(DBG_GLD, ("ql_attach: I/O base = 0x%x\n",
7349 		    qlge->iobase));
7350 		qlge->sequence |= INIT_REGS_SETUP;
7351 
7352 		/* map Doorbell memory space */
7353 		w = 3;
7354 		if (ddi_regs_map_setup(dip, w,
7355 		    (caddr_t *)&qlge->doorbell_reg_iobase, 0,
7356 		    0x100000 /* sizeof (dev_doorbell_reg_t) */,
7357 		    &ql_dev_acc_attr,
7358 		    &qlge->dev_doorbell_reg_handle) != DDI_SUCCESS) {
7359 			cmn_err(CE_WARN, "%s(%d): Unable to map Doorbell "
7360 			    "registers",
7361 			    ADAPTER_NAME, instance);
7362 			break;
7363 		}
7364 		QL_PRINT(DBG_GLD, ("ql_attach: Doorbell I/O base = 0x%x\n",
7365 		    qlge->doorbell_reg_iobase));
7366 		qlge->sequence |= INIT_DOORBELL_REGS_SETUP;
7367 
7368 		/*
7369 		 * Allocate a macinfo structure for this instance
7370 		 */
7371 		if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
7372 			cmn_err(CE_WARN, "%s(%d): mac_alloc failed",
7373 			    __func__, instance);
7374 			break;
7375 		}
7376 		/* save adapter status to dip private data */
7377 		ddi_set_driver_private(dip, qlge);
7378 		QL_PRINT(DBG_INIT, ("%s(%d): Allocate macinfo structure done\n",
7379 		    ADAPTER_NAME, instance));
7380 		qlge->sequence |= INIT_MAC_ALLOC;
7381 
7382 		/*
7383 		 * Attach this instance of the device
7384 		 */
7385 		/* Setup PCI Local Bus Configuration resource. */
7386 		if (pci_config_setup(dip, &qlge->pci_handle) != DDI_SUCCESS) {
7387 			cmn_err(CE_WARN, "%s(%d):Unable to get PCI resources",
7388 			    ADAPTER_NAME, instance);
7389 			if (qlge->fm_enable) {
7390 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7391 				ddi_fm_service_impact(qlge->dip,
7392 				    DDI_SERVICE_LOST);
7393 			}
7394 			break;
7395 		}
7396 		qlge->sequence |= INIT_PCI_CONFIG_SETUP;
7397 		QL_PRINT(DBG_GLD, ("ql_attach(%d): pci_config_setup done\n",
7398 		    instance));
7399 
7400 		if (ql_init_instance(qlge) != DDI_SUCCESS) {
7401 			cmn_err(CE_WARN, "%s(%d): Unable to initialize device "
7402 			    "instance", ADAPTER_NAME, instance);
7403 			if (qlge->fm_enable) {
7404 				ql_fm_ereport(qlge, DDI_FM_DEVICE_INVAL_STATE);
7405 				ddi_fm_service_impact(qlge->dip,
7406 				    DDI_SERVICE_LOST);
7407 			}
7408 			break;
7409 		}
7410 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_init_instance done\n",
7411 		    instance));
7412 
7413 		/* Setup interrupt vectors */
7414 		if (ql_alloc_irqs(qlge) != DDI_SUCCESS) {
7415 			break;
7416 		}
7417 		qlge->sequence |= INIT_INTR_ALLOC;
7418 		QL_PRINT(DBG_GLD, ("ql_attach(%d): ql_alloc_irqs done\n",
7419 		    instance));
7420 
7421 		/* Configure queues */
7422 		if (ql_setup_rings(qlge) != DDI_SUCCESS) {
7423 			break;
7424 		}
7425 		qlge->sequence |= INIT_SETUP_RINGS;
7426 		QL_PRINT(DBG_GLD, ("ql_attach(%d): setup rings done\n",
7427 		    instance));
7428 
7429 		/*
7430 		 * Allocate memory resources
7431 		 */
7432 		if (ql_alloc_mem_resources(qlge) != DDI_SUCCESS) {
7433 			cmn_err(CE_WARN, "%s(%d): memory allocation failed",
7434 			    __func__, qlge->instance);
7435 			break;
7436 		}
7437 		qlge->sequence |= INIT_MEMORY_ALLOC;
7438 		QL_PRINT(DBG_GLD, ("ql_alloc_mem_resources(%d) done\n",
7439 		    instance));
7440 
7441 		/*
7442 		 * Map queues to interrupt vectors
7443 		 */
7444 		ql_resolve_queues_to_irqs(qlge);
7445 
7446 		/* Initialize mutex, need the interrupt priority */
7447 		(void) ql_init_rx_tx_locks(qlge);
7448 		qlge->sequence |= INIT_LOCKS_CREATED;
7449 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_rx_tx_locks done\n",
7450 		    ADAPTER_NAME, instance));
7451 
7452 		/*
7453 		 * Use a soft interrupt to do something that we do not want
7454 		 * to do in regular network functions or with mutexs being held
7455 		 */
7456 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_event_intr_hdl,
7457 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_event_work, (caddr_t)qlge)
7458 		    != DDI_SUCCESS) {
7459 			break;
7460 		}
7461 
7462 		if (ddi_intr_add_softint(qlge->dip, &qlge->asic_reset_intr_hdl,
7463 		    DDI_INTR_SOFTPRI_MIN, ql_asic_reset_work, (caddr_t)qlge)
7464 		    != DDI_SUCCESS) {
7465 			break;
7466 		}
7467 
7468 		if (ddi_intr_add_softint(qlge->dip, &qlge->mpi_reset_intr_hdl,
7469 		    DDI_INTR_SOFTPRI_MIN, ql_mpi_reset_work, (caddr_t)qlge)
7470 		    != DDI_SUCCESS) {
7471 			break;
7472 		}
7473 		qlge->sequence |= INIT_ADD_SOFT_INTERRUPT;
7474 		QL_PRINT(DBG_INIT, ("%s(%d): ddi_intr_add_softint done\n",
7475 		    ADAPTER_NAME, instance));
7476 
7477 		/*
7478 		 * mutex to protect the adapter state structure.
7479 		 * initialize mutexes according to the interrupt priority
7480 		 */
7481 		mutex_init(&qlge->gen_mutex, NULL, MUTEX_DRIVER,
7482 		    DDI_INTR_PRI(qlge->intr_pri));
7483 		mutex_init(&qlge->hw_mutex, NULL, MUTEX_DRIVER,
7484 		    DDI_INTR_PRI(qlge->intr_pri));
7485 		mutex_init(&qlge->mbx_mutex, NULL, MUTEX_DRIVER,
7486 		    DDI_INTR_PRI(qlge->intr_pri));
7487 
7488 		/* Mailbox wait and interrupt conditional variable. */
7489 		cv_init(&qlge->cv_mbx_intr, NULL, CV_DRIVER, NULL);
7490 		qlge->sequence |= INIT_MUTEX;
7491 		QL_PRINT(DBG_INIT, ("%s(%d): mutex_init done\n",
7492 		    ADAPTER_NAME, instance));
7493 
7494 		/*
7495 		 * KStats
7496 		 */
7497 		if (ql_init_kstats(qlge) != DDI_SUCCESS) {
7498 			cmn_err(CE_WARN, "%s(%d): KState initialization failed",
7499 			    ADAPTER_NAME, instance);
7500 			break;
7501 		}
7502 		qlge->sequence |= INIT_KSTATS;
7503 		QL_PRINT(DBG_INIT, ("%s(%d): ql_init_kstats done\n",
7504 		    ADAPTER_NAME, instance));
7505 
7506 		/*
7507 		 * Initialize gld macinfo structure
7508 		 */
7509 		ql_gld3_init(qlge, macp);
7510 		/*
7511 		 * Add interrupt handlers
7512 		 */
7513 		if (ql_add_intr_handlers(qlge) != DDI_SUCCESS) {
7514 			cmn_err(CE_WARN, "Failed to add interrupt "
7515 			    "handlers");
7516 			break;
7517 		}
7518 		qlge->sequence |= INIT_ADD_INTERRUPT;
7519 		QL_PRINT(DBG_INIT, ("%s(%d): Add interrupt handler done\n",
7520 		    ADAPTER_NAME, instance));
7521 
7522 		/*
7523 		 * MAC Register
7524 		 */
7525 		if (mac_register(macp, &qlge->mh) != DDI_SUCCESS) {
7526 			cmn_err(CE_WARN, "%s(%d): mac_register failed",
7527 			    __func__, instance);
7528 			break;
7529 		}
7530 		qlge->sequence |= INIT_MAC_REGISTERED;
7531 		QL_PRINT(DBG_GLD, ("%s(%d): mac_register done\n",
7532 		    ADAPTER_NAME, instance));
7533 
7534 		mac_free(macp);
7535 		macp = NULL;
7536 
7537 		qlge->mac_flags = QL_MAC_ATTACHED;
7538 
7539 		ddi_report_dev(dip);
7540 
7541 		rval = DDI_SUCCESS;
7542 
7543 	break;
7544 /*
7545  * DDI_RESUME
7546  * When called  with  cmd  set  to  DDI_RESUME,  attach()  must
7547  * restore  the hardware state of a device (power may have been
7548  * removed from the device), allow  pending  requests  to  con-
7549  * tinue,  and  service  new requests. In this case, the driver
7550  * must not  make  any  assumptions  about  the  state  of  the
7551  * hardware,  but  must  restore the state of the device except
7552  * for the power level of components.
7553  *
7554  */
7555 	case DDI_RESUME:
7556 
7557 		if ((qlge = (qlge_t *)QL_GET_DEV(dip)) == NULL)
7558 			return (DDI_FAILURE);
7559 
7560 		QL_PRINT(DBG_GLD, ("%s(%d)-DDI_RESUME\n",
7561 		    __func__, qlge->instance));
7562 
7563 		mutex_enter(&qlge->gen_mutex);
7564 		rval = ql_do_start(qlge);
7565 		mutex_exit(&qlge->gen_mutex);
7566 		break;
7567 
7568 	default:
7569 		break;
7570 	}
7571 
7572 	/* if failed to attach */
7573 	if ((cmd == DDI_ATTACH) && (rval != DDI_SUCCESS) && (qlge != NULL)) {
7574 		cmn_err(CE_WARN, "qlge driver attach failed, sequence %x",
7575 		    qlge->sequence);
7576 		ql_free_resources(qlge);
7577 	}
7578 
7579 	return (rval);
7580 }
7581 
7582 /*
7583  * Unbind all pending tx dma handles during driver bring down
7584  */
7585 static void
7586 ql_unbind_pending_tx_dma_handle(struct tx_ring *tx_ring)
7587 {
7588 	struct tx_ring_desc *tx_ring_desc;
7589 	int i, j;
7590 
7591 	if (tx_ring->wq_desc) {
7592 		tx_ring_desc = tx_ring->wq_desc;
7593 		for (i = 0; i < tx_ring->wq_len; i++, tx_ring_desc++) {
7594 			for (j = 0; j < tx_ring_desc->tx_dma_handle_used; j++) {
7595 				if (tx_ring_desc->tx_dma_handle[j]) {
7596 					(void) ddi_dma_unbind_handle(
7597 					    tx_ring_desc->tx_dma_handle[j]);
7598 				}
7599 			}
7600 			tx_ring_desc->tx_dma_handle_used = 0;
7601 		} /* end of for loop */
7602 	}
7603 }
7604 /*
7605  * Wait for all the packets sent to the chip to finish transmission
7606  * to prevent buffers to be unmapped before or during a transmit operation
7607  */
7608 static int
7609 ql_wait_tx_quiesce(qlge_t *qlge)
7610 {
7611 	int count = MAX_TX_WAIT_COUNT, i;
7612 	int rings_done;
7613 	volatile struct tx_ring *tx_ring;
7614 	uint32_t consumer_idx;
7615 	uint32_t producer_idx;
7616 	uint32_t temp;
7617 	int done = 0;
7618 	int rval = DDI_FAILURE;
7619 
7620 	while (!done) {
7621 		rings_done = 0;
7622 
7623 		for (i = 0; i < qlge->tx_ring_count; i++) {
7624 			tx_ring = &qlge->tx_ring[i];
7625 			temp = ql_read_doorbell_reg(qlge,
7626 			    tx_ring->prod_idx_db_reg);
7627 			producer_idx = temp & 0x0000ffff;
7628 			consumer_idx = (temp >> 16);
7629 
7630 			if (qlge->isr_stride) {
7631 				struct rx_ring *ob_ring;
7632 				ob_ring = &qlge->rx_ring[tx_ring->cq_id];
7633 				if (producer_idx != ob_ring->cnsmr_idx) {
7634 					cmn_err(CE_NOTE, " force clean \n");
7635 					(void) ql_clean_outbound_rx_ring(
7636 					    ob_ring);
7637 				}
7638 			}
7639 			/*
7640 			 * Get the pending iocb count, ones which have not been
7641 			 * pulled down by the chip
7642 			 */
7643 			if (producer_idx >= consumer_idx)
7644 				temp = (producer_idx - consumer_idx);
7645 			else
7646 				temp = (tx_ring->wq_len - consumer_idx) +
7647 				    producer_idx;
7648 
7649 			if ((tx_ring->tx_free_count + temp) >= tx_ring->wq_len)
7650 				rings_done++;
7651 			else {
7652 				done = 1;
7653 				break;
7654 			}
7655 		}
7656 
7657 		/* If all the rings are done */
7658 		if (rings_done >= qlge->tx_ring_count) {
7659 #ifdef QLGE_LOAD_UNLOAD
7660 			cmn_err(CE_NOTE, "%s(%d) done successfully \n",
7661 			    __func__, qlge->instance);
7662 #endif
7663 			rval = DDI_SUCCESS;
7664 			break;
7665 		}
7666 
7667 		qlge_delay(100);
7668 
7669 		count--;
7670 		if (!count) {
7671 
7672 			count = MAX_TX_WAIT_COUNT;
7673 #ifdef QLGE_LOAD_UNLOAD
7674 			volatile struct rx_ring *rx_ring;
7675 			cmn_err(CE_NOTE, "%s(%d): Waiting for %d pending"
7676 			    " Transmits on queue %d to complete .\n",
7677 			    __func__, qlge->instance,
7678 			    (qlge->tx_ring[i].wq_len -
7679 			    qlge->tx_ring[i].tx_free_count),
7680 			    i);
7681 
7682 			rx_ring = &qlge->rx_ring[i+1];
7683 			temp = ql_read_doorbell_reg(qlge,
7684 			    rx_ring->cnsmr_idx_db_reg);
7685 			consumer_idx = temp & 0x0000ffff;
7686 			producer_idx = (temp >> 16);
7687 			cmn_err(CE_NOTE, "%s(%d): Transmit completion queue %d,"
7688 			    " Producer %d, Consumer %d\n",
7689 			    __func__, qlge->instance,
7690 			    i+1,
7691 			    producer_idx, consumer_idx);
7692 
7693 			temp = ql_read_doorbell_reg(qlge,
7694 			    tx_ring->prod_idx_db_reg);
7695 			producer_idx = temp & 0x0000ffff;
7696 			consumer_idx = (temp >> 16);
7697 			cmn_err(CE_NOTE, "%s(%d): Transmit request queue %d,"
7698 			    " Producer %d, Consumer %d\n",
7699 			    __func__, qlge->instance, i,
7700 			    producer_idx, consumer_idx);
7701 #endif
7702 
7703 			/* For now move on */
7704 			break;
7705 		}
7706 	}
7707 	/* Stop the request queue */
7708 	mutex_enter(&qlge->hw_mutex);
7709 	for (i = 0; i < qlge->tx_ring_count; i++) {
7710 		if (qlge->tx_ring[i].valid_db_reg) {
7711 			ql_write_doorbell_reg(qlge,
7712 			    qlge->tx_ring[i].valid_db_reg, 0);
7713 		}
7714 	}
7715 	mutex_exit(&qlge->hw_mutex);
7716 	return (rval);
7717 }
7718 
7719 /*
7720  * Wait for all the receives indicated to the stack to come back
7721  */
7722 static int
7723 ql_wait_rx_complete(qlge_t *qlge)
7724 {
7725 	int i;
7726 	/* Disable all the completion queues */
7727 	mutex_enter(&qlge->hw_mutex);
7728 	for (i = 0; i < qlge->rx_ring_count; i++) {
7729 		if (qlge->rx_ring[i].valid_db_reg) {
7730 			ql_write_doorbell_reg(qlge,
7731 			    qlge->rx_ring[i].valid_db_reg, 0);
7732 		}
7733 	}
7734 	mutex_exit(&qlge->hw_mutex);
7735 
7736 	/* Wait for OS to return all rx buffers */
7737 	qlge_delay(QL_ONE_SEC_DELAY);
7738 	return (DDI_SUCCESS);
7739 }
7740 
7741 /*
7742  * stop the driver
7743  */
7744 static int
7745 ql_bringdown_adapter(qlge_t *qlge)
7746 {
7747 	int i;
7748 	int status = DDI_SUCCESS;
7749 
7750 	qlge->mac_flags = QL_MAC_BRINGDOWN;
7751 	if (qlge->sequence & ADAPTER_INIT) {
7752 		/* stop forwarding external packets to driver */
7753 		status = ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7754 		if (status)
7755 			return (status);
7756 		(void) ql_stop_routing(qlge);
7757 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7758 		/*
7759 		 * Set the flag for receive and transmit
7760 		 * operations to cease
7761 		 */
7762 		for (i = 0; i < qlge->tx_ring_count; i++) {
7763 			mutex_enter(&qlge->tx_ring[i].tx_lock);
7764 			qlge->tx_ring[i].mac_flags = QL_MAC_STOPPED;
7765 			mutex_exit(&qlge->tx_ring[i].tx_lock);
7766 		}
7767 
7768 		for (i = 0; i < qlge->rx_ring_count; i++) {
7769 			mutex_enter(&qlge->rx_ring[i].rx_lock);
7770 			qlge->rx_ring[i].mac_flags = QL_MAC_STOPPED;
7771 			mutex_exit(&qlge->rx_ring[i].rx_lock);
7772 		}
7773 
7774 		/*
7775 		 * Need interrupts to be running while the transmit
7776 		 * completions are cleared. Wait for the packets
7777 		 * queued to the chip to be sent out
7778 		 */
7779 		(void) ql_wait_tx_quiesce(qlge);
7780 		/* Interrupts not needed from now */
7781 		ql_disable_all_completion_interrupts(qlge);
7782 
7783 		mutex_enter(&qlge->hw_mutex);
7784 		/* Disable Global interrupt */
7785 		ql_disable_global_interrupt(qlge);
7786 		mutex_exit(&qlge->hw_mutex);
7787 
7788 		/* Wait for all the indicated packets to come back */
7789 		status = ql_wait_rx_complete(qlge);
7790 
7791 		mutex_enter(&qlge->hw_mutex);
7792 		/* Reset adapter */
7793 		(void) ql_asic_reset(qlge);
7794 		/*
7795 		 * Unbind all tx dma handles to prevent pending tx descriptors'
7796 		 * dma handles from being re-used.
7797 		 */
7798 		for (i = 0; i < qlge->tx_ring_count; i++) {
7799 			ql_unbind_pending_tx_dma_handle(&qlge->tx_ring[i]);
7800 		}
7801 
7802 		qlge->sequence &= ~ADAPTER_INIT;
7803 
7804 		mutex_exit(&qlge->hw_mutex);
7805 	}
7806 	return (status);
7807 }
7808 
7809 /*
7810  * ql_detach
7811  * Used to remove all the states associated with a given
7812  * instances of a device node prior to the removal of that
7813  * instance from the system.
7814  */
7815 static int
7816 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7817 {
7818 	qlge_t *qlge;
7819 	int rval;
7820 
7821 	rval = DDI_SUCCESS;
7822 
7823 	switch (cmd) {
7824 	case DDI_DETACH:
7825 
7826 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7827 			return (DDI_FAILURE);
7828 		rval = ql_bringdown_adapter(qlge);
7829 		if (rval != DDI_SUCCESS)
7830 			break;
7831 
7832 		qlge->mac_flags = QL_MAC_DETACH;
7833 
7834 		/* free memory resources */
7835 		if (qlge->sequence & INIT_MEMORY_ALLOC) {
7836 			ql_free_mem_resources(qlge);
7837 			qlge->sequence &= ~INIT_MEMORY_ALLOC;
7838 		}
7839 		ql_free_resources(qlge);
7840 
7841 		break;
7842 
7843 	case DDI_SUSPEND:
7844 		if ((qlge = QL_GET_DEV(dip)) == NULL)
7845 			return (DDI_FAILURE);
7846 
7847 		mutex_enter(&qlge->gen_mutex);
7848 		if ((qlge->mac_flags == QL_MAC_ATTACHED) ||
7849 		    (qlge->mac_flags == QL_MAC_STARTED)) {
7850 			(void) ql_do_stop(qlge);
7851 		}
7852 		qlge->mac_flags = QL_MAC_SUSPENDED;
7853 		mutex_exit(&qlge->gen_mutex);
7854 
7855 		break;
7856 	default:
7857 		rval = DDI_FAILURE;
7858 		break;
7859 	}
7860 
7861 	return (rval);
7862 }
7863 
7864 /*
7865  * quiesce(9E) entry point.
7866  *
7867  * This function is called when the system is single-threaded at high
7868  * PIL with preemption disabled. Therefore, this function must not be
7869  * blocked.
7870  *
7871  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
7872  */
7873 int
7874 ql_quiesce(dev_info_t *dip)
7875 {
7876 	qlge_t *qlge;
7877 	int i;
7878 
7879 	if ((qlge = QL_GET_DEV(dip)) == NULL)
7880 		return (DDI_FAILURE);
7881 
7882 	if (CFG_IST(qlge, CFG_CHIP_8100)) {
7883 		/* stop forwarding external packets to driver */
7884 		(void) ql_sem_spinlock(qlge, SEM_RT_IDX_MASK);
7885 		(void) ql_stop_routing(qlge);
7886 		ql_sem_unlock(qlge, SEM_RT_IDX_MASK);
7887 		/* Stop all the request queues */
7888 		for (i = 0; i < qlge->tx_ring_count; i++) {
7889 			if (qlge->tx_ring[i].valid_db_reg) {
7890 				ql_write_doorbell_reg(qlge,
7891 				    qlge->tx_ring[i].valid_db_reg, 0);
7892 			}
7893 		}
7894 		qlge_delay(QL_ONE_SEC_DELAY/4);
7895 		/* Interrupts not needed from now */
7896 		/* Disable MPI interrupt */
7897 		ql_write_reg(qlge, REG_INTERRUPT_MASK,
7898 		    (INTR_MASK_PI << 16));
7899 		ql_disable_global_interrupt(qlge);
7900 
7901 		/* Disable all the rx completion queues */
7902 		for (i = 0; i < qlge->rx_ring_count; i++) {
7903 			if (qlge->rx_ring[i].valid_db_reg) {
7904 				ql_write_doorbell_reg(qlge,
7905 				    qlge->rx_ring[i].valid_db_reg, 0);
7906 			}
7907 		}
7908 		qlge_delay(QL_ONE_SEC_DELAY/4);
7909 		qlge->mac_flags = QL_MAC_STOPPED;
7910 		/* Reset adapter */
7911 		(void) ql_asic_reset(qlge);
7912 		qlge_delay(100);
7913 	}
7914 
7915 	return (DDI_SUCCESS);
7916 }
7917 
7918 QL_STREAM_OPS(ql_ops, ql_attach, ql_detach);
7919 
7920 /*
7921  * Loadable Driver Interface Structures.
7922  * Declare and initialize the module configuration section...
7923  */
7924 static struct modldrv modldrv = {
7925 	&mod_driverops,		/* type of module: driver */
7926 	version,		/* name of module */
7927 	&ql_ops			/* driver dev_ops */
7928 };
7929 
7930 static struct modlinkage modlinkage = {
7931 	MODREV_1, 	&modldrv,	NULL
7932 };
7933 
7934 /*
7935  * Loadable Module Routines
7936  */
7937 
7938 /*
7939  * _init
7940  * Initializes a loadable module. It is called before any other
7941  * routine in a loadable module.
7942  */
7943 int
7944 _init(void)
7945 {
7946 	int rval;
7947 
7948 	mac_init_ops(&ql_ops, ADAPTER_NAME);
7949 	rval = mod_install(&modlinkage);
7950 	if (rval != DDI_SUCCESS) {
7951 		mac_fini_ops(&ql_ops);
7952 		cmn_err(CE_WARN, "?Unable to install/attach driver '%s'",
7953 		    ADAPTER_NAME);
7954 	}
7955 
7956 	return (rval);
7957 }
7958 
7959 /*
7960  * _fini
7961  * Prepares a module for unloading. It is called when the system
7962  * wants to unload a module. If the module determines that it can
7963  * be unloaded, then _fini() returns the value returned by
7964  * mod_remove(). Upon successful return from _fini() no other
7965  * routine in the module will be called before _init() is called.
7966  */
7967 int
7968 _fini(void)
7969 {
7970 	int rval;
7971 
7972 	rval = mod_remove(&modlinkage);
7973 	if (rval == DDI_SUCCESS) {
7974 		mac_fini_ops(&ql_ops);
7975 	}
7976 
7977 	return (rval);
7978 }
7979 
7980 /*
7981  * _info
7982  * Returns information about loadable module.
7983  */
7984 int
7985 _info(struct modinfo *modinfop)
7986 {
7987 	return (mod_info(&modlinkage, modinfop));
7988 }
7989