1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006-2014 QLogic Corporation
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32/*
33 * The following controllers are supported by this driver:
34 *   BCM5706C A2, A3
35 *   BCM5706S A2, A3
36 *   BCM5708C B1, B2
37 *   BCM5708S B1, B2
38 *   BCM5709C A1, C0
39 *   BCM5709S A1, C0
40 *   BCM5716C C0
41 *   BCM5716S C0
42 *
43 * The following controllers are not supported by this driver:
44 *   BCM5706C A0, A1 (pre-production)
45 *   BCM5706S A0, A1 (pre-production)
46 *   BCM5708C A0, B0 (pre-production)
47 *   BCM5708S A0, B0 (pre-production)
48 *   BCM5709C A0  B0, B1, B2 (pre-production)
49 *   BCM5709S A0, B0, B1, B2 (pre-production)
50 */
51
52#include "opt_bce.h"
53
54#include <sys/param.h>
55#include <sys/endian.h>
56#include <sys/systm.h>
57#include <sys/sockio.h>
58#include <sys/lock.h>
59#include <sys/mbuf.h>
60#include <sys/malloc.h>
61#include <sys/mutex.h>
62#include <sys/kernel.h>
63#include <sys/module.h>
64#include <sys/socket.h>
65#include <sys/sysctl.h>
66#include <sys/queue.h>
67
68#include <net/bpf.h>
69#include <net/ethernet.h>
70#include <net/if.h>
71#include <net/if_var.h>
72#include <net/if_arp.h>
73#include <net/if_dl.h>
74#include <net/if_media.h>
75
76#include <net/if_types.h>
77#include <net/if_vlan_var.h>
78
79#include <netinet/in_systm.h>
80#include <netinet/in.h>
81#include <netinet/if_ether.h>
82#include <netinet/ip.h>
83#include <netinet/ip6.h>
84#include <netinet/tcp.h>
85#include <netinet/udp.h>
86
87#include <machine/bus.h>
88#include <machine/resource.h>
89#include <sys/bus.h>
90#include <sys/rman.h>
91
92#include <dev/mii/mii.h>
93#include <dev/mii/miivar.h>
94#include "miidevs.h"
95#include <dev/mii/brgphyreg.h>
96
97#include <dev/pci/pcireg.h>
98#include <dev/pci/pcivar.h>
99
100#include "miibus_if.h"
101
102#include <dev/bce/if_bcereg.h>
103#include <dev/bce/if_bcefw.h>
104
105/****************************************************************************/
106/* BCE Debug Options                                                        */
107/****************************************************************************/
108#ifdef BCE_DEBUG
109	u32 bce_debug = BCE_WARN;
110
111	/*          0 = Never              */
112	/*          1 = 1 in 2,147,483,648 */
113	/*        256 = 1 in     8,388,608 */
114	/*       2048 = 1 in     1,048,576 */
115	/*      65536 = 1 in        32,768 */
116	/*    1048576 = 1 in         2,048 */
117	/*  268435456 =	1 in             8 */
118	/*  536870912 = 1 in             4 */
119	/* 1073741824 = 1 in             2 */
120
121	/* Controls how often the l2_fhdr frame error check will fail. */
122	int l2fhdr_error_sim_control = 0;
123
124	/* Controls how often the unexpected attention check will fail. */
125	int unexpected_attention_sim_control = 0;
126
127	/* Controls how often to simulate an mbuf allocation failure. */
128	int mbuf_alloc_failed_sim_control = 0;
129
130	/* Controls how often to simulate a DMA mapping failure. */
131	int dma_map_addr_failed_sim_control = 0;
132
133	/* Controls how often to simulate a bootcode failure. */
134	int bootcode_running_failure_sim_control = 0;
135#endif
136
137/****************************************************************************/
138/* PCI Device ID Table                                                      */
139/*                                                                          */
140/* Used by bce_probe() to identify the devices supported by this driver.    */
141/****************************************************************************/
142#define BCE_DEVDESC_MAX		64
143
144static const struct bce_type bce_devs[] = {
145	/* BCM5706C Controllers and OEM boards. */
146	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
147		"HP NC370T Multifunction Gigabit Server Adapter" },
148	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
149		"HP NC370i Multifunction Gigabit Server Adapter" },
150	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
151		"HP NC380T PCIe DP Multifunc Gig Server Adapter" },
152	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
153		"HP NC371i Multifunction Gigabit Server Adapter" },
154	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
155		"QLogic NetXtreme II BCM5706 1000Base-T" },
156
157	/* BCM5706S controllers and OEM boards. */
158	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
159		"HP NC370F Multifunction Gigabit Server Adapter" },
160	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
161		"QLogic NetXtreme II BCM5706 1000Base-SX" },
162
163	/* BCM5708C controllers and OEM boards. */
164	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
165		"HP NC373T PCIe Multifunction Gig Server Adapter" },
166	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
167		"HP NC373i Multifunction Gigabit Server Adapter" },
168	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
169		"HP NC374m PCIe Multifunction Adapter" },
170	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
171		"QLogic NetXtreme II BCM5708 1000Base-T" },
172
173	/* BCM5708S controllers and OEM boards. */
174	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
175		"HP NC373m Multifunction Gigabit Server Adapter" },
176	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
177		"HP NC373i Multifunction Gigabit Server Adapter" },
178	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
179		"HP NC373F PCIe Multifunc Giga Server Adapter" },
180	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
181		"QLogic NetXtreme II BCM5708 1000Base-SX" },
182
183	/* BCM5709C controllers and OEM boards. */
184	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
185		"HP NC382i DP Multifunction Gigabit Server Adapter" },
186	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
187		"HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
188	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
189		"QLogic NetXtreme II BCM5709 1000Base-T" },
190
191	/* BCM5709S controllers and OEM boards. */
192	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
193		"HP NC382m DP 1GbE Multifunction BL-c Adapter" },
194	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
195		"HP NC382i DP Multifunction Gigabit Server Adapter" },
196	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
197		"QLogic NetXtreme II BCM5709 1000Base-SX" },
198
199	/* BCM5716 controllers and OEM boards. */
200	{ BRCM_VENDORID, BRCM_DEVICEID_BCM5716,  PCI_ANY_ID,  PCI_ANY_ID,
201		"QLogic NetXtreme II BCM5716 1000Base-T" },
202	{ 0, 0, 0, 0, NULL }
203};
204
205/****************************************************************************/
206/* Supported Flash NVRAM device data.                                       */
207/****************************************************************************/
208static const struct flash_spec flash_table[] =
209{
210#define BUFFERED_FLAGS		(BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
211#define NONBUFFERED_FLAGS	(BCE_NV_WREN)
212
213	/* Slow EEPROM */
214	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
215	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
216	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
217	 "EEPROM - slow"},
218	/* Expansion entry 0001 */
219	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
220	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
221	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
222	 "Entry 0001"},
223	/* Saifun SA25F010 (non-buffered flash) */
224	/* strap, cfg1, & write1 need updates */
225	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
226	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
227	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
228	 "Non-buffered flash (128kB)"},
229	/* Saifun SA25F020 (non-buffered flash) */
230	/* strap, cfg1, & write1 need updates */
231	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
232	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
233	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
234	 "Non-buffered flash (256kB)"},
235	/* Expansion entry 0100 */
236	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
237	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
238	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
239	 "Entry 0100"},
240	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
241	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
242	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
243	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
244	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
245	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
246	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
247	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
248	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
249	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
250	/* Saifun SA25F005 (non-buffered flash) */
251	/* strap, cfg1, & write1 need updates */
252	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
253	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
254	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
255	 "Non-buffered flash (64kB)"},
256	/* Fast EEPROM */
257	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
258	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
259	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
260	 "EEPROM - fast"},
261	/* Expansion entry 1001 */
262	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
263	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
264	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
265	 "Entry 1001"},
266	/* Expansion entry 1010 */
267	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
268	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
269	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
270	 "Entry 1010"},
271	/* ATMEL AT45DB011B (buffered flash) */
272	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
273	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
274	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
275	 "Buffered flash (128kB)"},
276	/* Expansion entry 1100 */
277	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
278	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
279	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
280	 "Entry 1100"},
281	/* Expansion entry 1101 */
282	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
283	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
284	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
285	 "Entry 1101"},
286	/* Ateml Expansion entry 1110 */
287	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
288	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
289	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
290	 "Entry 1110 (Atmel)"},
291	/* ATMEL AT45DB021B (buffered flash) */
292	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
293	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
294	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
295	 "Buffered flash (256kB)"},
296};
297
298/*
299 * The BCM5709 controllers transparently handle the
300 * differences between Atmel 264 byte pages and all
301 * flash devices which use 256 byte pages, so no
302 * logical-to-physical mapping is required in the
303 * driver.
304 */
305static const struct flash_spec flash_5709 = {
306	.flags		= BCE_NV_BUFFERED,
307	.page_bits	= BCM5709_FLASH_PAGE_BITS,
308	.page_size	= BCM5709_FLASH_PAGE_SIZE,
309	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
310	.total_size	= BUFFERED_FLASH_TOTAL_SIZE * 2,
311	.name		= "5709/5716 buffered flash (256kB)",
312};
313
314/****************************************************************************/
315/* FreeBSD device entry points.                                             */
316/****************************************************************************/
317static int  bce_probe			(device_t);
318static int  bce_attach			(device_t);
319static int  bce_detach			(device_t);
320static int  bce_shutdown		(device_t);
321
322/****************************************************************************/
323/* BCE Debug Data Structure Dump Routines                                   */
324/****************************************************************************/
325#ifdef BCE_DEBUG
326static u32  bce_reg_rd				(struct bce_softc *, u32);
327static void bce_reg_wr				(struct bce_softc *, u32, u32);
328static void bce_reg_wr16			(struct bce_softc *, u32, u16);
329static u32  bce_ctx_rd				(struct bce_softc *, u32, u32);
330static void bce_dump_enet			(struct bce_softc *, struct mbuf *);
331static void bce_dump_mbuf			(struct bce_softc *, struct mbuf *);
332static void bce_dump_tx_mbuf_chain	(struct bce_softc *, u16, int);
333static void bce_dump_rx_mbuf_chain	(struct bce_softc *, u16, int);
334static void bce_dump_pg_mbuf_chain	(struct bce_softc *, u16, int);
335static void bce_dump_txbd			(struct bce_softc *,
336    int, struct tx_bd *);
337static void bce_dump_rxbd			(struct bce_softc *,
338    int, struct rx_bd *);
339static void bce_dump_pgbd			(struct bce_softc *,
340    int, struct rx_bd *);
341static void bce_dump_l2fhdr		(struct bce_softc *,
342    int, struct l2_fhdr *);
343static void bce_dump_ctx			(struct bce_softc *, u16);
344static void bce_dump_ftqs			(struct bce_softc *);
345static void bce_dump_tx_chain		(struct bce_softc *, u16, int);
346static void bce_dump_rx_bd_chain	(struct bce_softc *, u16, int);
347static void bce_dump_pg_chain		(struct bce_softc *, u16, int);
348static void bce_dump_status_block	(struct bce_softc *);
349static void bce_dump_stats_block	(struct bce_softc *);
350static void bce_dump_driver_state	(struct bce_softc *);
351static void bce_dump_hw_state		(struct bce_softc *);
352static void bce_dump_shmem_state	(struct bce_softc *);
353static void bce_dump_mq_regs		(struct bce_softc *);
354static void bce_dump_bc_state		(struct bce_softc *);
355static void bce_dump_txp_state		(struct bce_softc *, int);
356static void bce_dump_rxp_state		(struct bce_softc *, int);
357static void bce_dump_tpat_state	(struct bce_softc *, int);
358static void bce_dump_cp_state		(struct bce_softc *, int);
359static void bce_dump_com_state		(struct bce_softc *, int);
360static void bce_dump_rv2p_state	(struct bce_softc *);
361static void bce_breakpoint			(struct bce_softc *);
362#endif /*BCE_DEBUG */
363
364/****************************************************************************/
365/* BCE Register/Memory Access Routines                                      */
366/****************************************************************************/
367static u32  bce_reg_rd_ind		(struct bce_softc *, u32);
368static void bce_reg_wr_ind		(struct bce_softc *, u32, u32);
369static void bce_shmem_wr		(struct bce_softc *, u32, u32);
370static u32  bce_shmem_rd		(struct bce_softc *, u32);
371static void bce_ctx_wr			(struct bce_softc *, u32, u32, u32);
372static int  bce_miibus_read_reg		(device_t, int, int);
373static int  bce_miibus_write_reg	(device_t, int, int, int);
374static void bce_miibus_statchg		(device_t);
375
376#ifdef BCE_DEBUG
377static int bce_sysctl_nvram_dump(SYSCTL_HANDLER_ARGS);
378#ifdef BCE_NVRAM_WRITE_SUPPORT
379static int bce_sysctl_nvram_write(SYSCTL_HANDLER_ARGS);
380#endif
381#endif
382
383/****************************************************************************/
384/* BCE NVRAM Access Routines                                                */
385/****************************************************************************/
386static int  bce_acquire_nvram_lock	(struct bce_softc *);
387static int  bce_release_nvram_lock	(struct bce_softc *);
388static void bce_enable_nvram_access(struct bce_softc *);
389static void bce_disable_nvram_access(struct bce_softc *);
390static int  bce_nvram_read_dword	(struct bce_softc *, u32, u8 *, u32);
391static int  bce_init_nvram			(struct bce_softc *);
392static int  bce_nvram_read			(struct bce_softc *, u32, u8 *, int);
393static int  bce_nvram_test			(struct bce_softc *);
394#ifdef BCE_NVRAM_WRITE_SUPPORT
395static int  bce_enable_nvram_write	(struct bce_softc *);
396static void bce_disable_nvram_write(struct bce_softc *);
397static int  bce_nvram_erase_page	(struct bce_softc *, u32);
398static int  bce_nvram_write_dword	(struct bce_softc *, u32, u8 *, u32);
399static int  bce_nvram_write		(struct bce_softc *, u32, u8 *, int);
400#endif
401
402/****************************************************************************/
403/*                                                                          */
404/****************************************************************************/
405static void bce_get_rx_buffer_sizes(struct bce_softc *, int);
406static void bce_get_media			(struct bce_softc *);
407static void bce_init_media			(struct bce_softc *);
408static u32 bce_get_rphy_link		(struct bce_softc *);
409static void bce_dma_map_addr		(void *, bus_dma_segment_t *, int, int);
410static int  bce_dma_alloc			(device_t);
411static void bce_dma_free			(struct bce_softc *);
412static void bce_release_resources	(struct bce_softc *);
413
414/****************************************************************************/
415/* BCE Firmware Synchronization and Load                                    */
416/****************************************************************************/
417static void bce_fw_cap_init			(struct bce_softc *);
418static int  bce_fw_sync			(struct bce_softc *, u32);
419static void bce_load_rv2p_fw		(struct bce_softc *, const u32 *, u32,
420    u32);
421static void bce_load_cpu_fw		(struct bce_softc *,
422    struct cpu_reg *, struct fw_info *);
423static void bce_start_cpu			(struct bce_softc *, struct cpu_reg *);
424static void bce_halt_cpu			(struct bce_softc *, struct cpu_reg *);
425static void bce_start_rxp_cpu		(struct bce_softc *);
426static void bce_init_rxp_cpu		(struct bce_softc *);
427static void bce_init_txp_cpu 		(struct bce_softc *);
428static void bce_init_tpat_cpu		(struct bce_softc *);
429static void bce_init_cp_cpu	  	(struct bce_softc *);
430static void bce_init_com_cpu	  	(struct bce_softc *);
431static void bce_init_cpus			(struct bce_softc *);
432
433static void bce_print_adapter_info	(struct bce_softc *);
434static void bce_probe_pci_caps		(device_t, struct bce_softc *);
435static void bce_stop				(struct bce_softc *);
436static int  bce_reset				(struct bce_softc *, u32);
437static int  bce_chipinit 			(struct bce_softc *);
438static int  bce_blockinit 			(struct bce_softc *);
439
440static int  bce_init_tx_chain		(struct bce_softc *);
441static void bce_free_tx_chain		(struct bce_softc *);
442
443static int  bce_get_rx_buf		(struct bce_softc *, u16, u16, u32 *);
444static int  bce_init_rx_chain		(struct bce_softc *);
445static void bce_fill_rx_chain		(struct bce_softc *);
446static void bce_free_rx_chain		(struct bce_softc *);
447
448static int  bce_get_pg_buf		(struct bce_softc *, u16, u16);
449static int  bce_init_pg_chain		(struct bce_softc *);
450static void bce_fill_pg_chain		(struct bce_softc *);
451static void bce_free_pg_chain		(struct bce_softc *);
452
453static struct mbuf *bce_tso_setup	(struct bce_softc *,
454    struct mbuf **, u16 *);
455static int  bce_tx_encap			(struct bce_softc *, struct mbuf **);
456static void bce_start_locked		(struct ifnet *);
457static void bce_start			(struct ifnet *);
458static int  bce_ioctl			(struct ifnet *, u_long, caddr_t);
459static uint64_t bce_get_counter		(struct ifnet *, ift_counter);
460static void bce_watchdog		(struct bce_softc *);
461static int  bce_ifmedia_upd		(struct ifnet *);
462static int  bce_ifmedia_upd_locked	(struct ifnet *);
463static void bce_ifmedia_sts		(struct ifnet *, struct ifmediareq *);
464static void bce_ifmedia_sts_rphy	(struct bce_softc *, struct ifmediareq *);
465static void bce_init_locked		(struct bce_softc *);
466static void bce_init				(void *);
467static void bce_mgmt_init_locked	(struct bce_softc *sc);
468
469static int  bce_init_ctx			(struct bce_softc *);
470static void bce_get_mac_addr		(struct bce_softc *);
471static void bce_set_mac_addr		(struct bce_softc *);
472static void bce_phy_intr			(struct bce_softc *);
473static inline u16 bce_get_hw_rx_cons	(struct bce_softc *);
474static void bce_rx_intr			(struct bce_softc *);
475static void bce_tx_intr			(struct bce_softc *);
476static void bce_disable_intr		(struct bce_softc *);
477static void bce_enable_intr		(struct bce_softc *, int);
478
479static void bce_intr				(void *);
480static void bce_set_rx_mode		(struct bce_softc *);
481static void bce_stats_update		(struct bce_softc *);
482static void bce_tick				(void *);
483static void bce_pulse				(void *);
484static void bce_add_sysctls		(struct bce_softc *);
485
486/****************************************************************************/
487/* FreeBSD device dispatch table.                                           */
488/****************************************************************************/
489static device_method_t bce_methods[] = {
490	/* Device interface (device_if.h) */
491	DEVMETHOD(device_probe,		bce_probe),
492	DEVMETHOD(device_attach,	bce_attach),
493	DEVMETHOD(device_detach,	bce_detach),
494	DEVMETHOD(device_shutdown,	bce_shutdown),
495/* Supported by device interface but not used here. */
496/*	DEVMETHOD(device_identify,	bce_identify),      */
497/*	DEVMETHOD(device_suspend,	bce_suspend),       */
498/*	DEVMETHOD(device_resume,	bce_resume),        */
499/*	DEVMETHOD(device_quiesce,	bce_quiesce),       */
500
501	/* MII interface (miibus_if.h) */
502	DEVMETHOD(miibus_readreg,	bce_miibus_read_reg),
503	DEVMETHOD(miibus_writereg,	bce_miibus_write_reg),
504	DEVMETHOD(miibus_statchg,	bce_miibus_statchg),
505/* Supported by MII interface but not used here.       */
506/*	DEVMETHOD(miibus_linkchg,	bce_miibus_linkchg),   */
507/*	DEVMETHOD(miibus_mediainit,	bce_miibus_mediainit), */
508
509	DEVMETHOD_END
510};
511
512static driver_t bce_driver = {
513	"bce",
514	bce_methods,
515	sizeof(struct bce_softc)
516};
517
518static devclass_t bce_devclass;
519
520MODULE_DEPEND(bce, pci, 1, 1, 1);
521MODULE_DEPEND(bce, ether, 1, 1, 1);
522MODULE_DEPEND(bce, miibus, 1, 1, 1);
523
524DRIVER_MODULE(bce, pci, bce_driver, bce_devclass, NULL, NULL);
525DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
526MODULE_PNP_INFO("U16:vendor;U16:device;U16:#;U16:#;D:#", pci, bce,
527    bce_devs, nitems(bce_devs) - 1);
528
529/****************************************************************************/
530/* Tunable device values                                                    */
531/****************************************************************************/
532static SYSCTL_NODE(_hw, OID_AUTO, bce, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
533    "bce driver parameters");
534
535/* Allowable values are TRUE or FALSE */
536static int bce_verbose = TRUE;
537SYSCTL_INT(_hw_bce, OID_AUTO, verbose, CTLFLAG_RDTUN, &bce_verbose, 0,
538    "Verbose output enable/disable");
539
540/* Allowable values are TRUE or FALSE */
541static int bce_tso_enable = TRUE;
542SYSCTL_INT(_hw_bce, OID_AUTO, tso_enable, CTLFLAG_RDTUN, &bce_tso_enable, 0,
543    "TSO Enable/Disable");
544
545/* Allowable values are 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
546/* ToDo: Add MSI-X support. */
547static int bce_msi_enable = 1;
548SYSCTL_INT(_hw_bce, OID_AUTO, msi_enable, CTLFLAG_RDTUN, &bce_msi_enable, 0,
549    "MSI-X|MSI|INTx selector");
550
551/* Allowable values are 1, 2, 4, 8. */
552static int bce_rx_pages = DEFAULT_RX_PAGES;
553SYSCTL_UINT(_hw_bce, OID_AUTO, rx_pages, CTLFLAG_RDTUN, &bce_rx_pages, 0,
554    "Receive buffer descriptor pages (1 page = 255 buffer descriptors)");
555
556/* Allowable values are 1, 2, 4, 8. */
557static int bce_tx_pages = DEFAULT_TX_PAGES;
558SYSCTL_UINT(_hw_bce, OID_AUTO, tx_pages, CTLFLAG_RDTUN, &bce_tx_pages, 0,
559    "Transmit buffer descriptor pages (1 page = 255 buffer descriptors)");
560
561/* Allowable values are TRUE or FALSE. */
562static int bce_hdr_split = TRUE;
563SYSCTL_UINT(_hw_bce, OID_AUTO, hdr_split, CTLFLAG_RDTUN, &bce_hdr_split, 0,
564    "Frame header/payload splitting Enable/Disable");
565
566/* Allowable values are TRUE or FALSE. */
567static int bce_strict_rx_mtu = FALSE;
568SYSCTL_UINT(_hw_bce, OID_AUTO, strict_rx_mtu, CTLFLAG_RDTUN,
569    &bce_strict_rx_mtu, 0,
570    "Enable/Disable strict RX frame size checking");
571
572/* Allowable values are 0 ... 100 */
573#ifdef BCE_DEBUG
574/* Generate 1 interrupt for every transmit completion. */
575static int bce_tx_quick_cons_trip_int = 1;
576#else
577/* Generate 1 interrupt for every 20 transmit completions. */
578static int bce_tx_quick_cons_trip_int = DEFAULT_TX_QUICK_CONS_TRIP_INT;
579#endif
580SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip_int, CTLFLAG_RDTUN,
581    &bce_tx_quick_cons_trip_int, 0,
582    "Transmit BD trip point during interrupts");
583
584/* Allowable values are 0 ... 100 */
585/* Generate 1 interrupt for every transmit completion. */
586#ifdef BCE_DEBUG
587static int bce_tx_quick_cons_trip = 1;
588#else
589/* Generate 1 interrupt for every 20 transmit completions. */
590static int bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
591#endif
592SYSCTL_UINT(_hw_bce, OID_AUTO, tx_quick_cons_trip, CTLFLAG_RDTUN,
593    &bce_tx_quick_cons_trip, 0,
594    "Transmit BD trip point");
595
596/* Allowable values are 0 ... 100 */
597#ifdef BCE_DEBUG
598/* Generate an interrupt if 0us have elapsed since the last TX completion. */
599static int bce_tx_ticks_int = 0;
600#else
601/* Generate an interrupt if 80us have elapsed since the last TX completion. */
602static int bce_tx_ticks_int = DEFAULT_TX_TICKS_INT;
603#endif
604SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks_int, CTLFLAG_RDTUN,
605    &bce_tx_ticks_int, 0, "Transmit ticks count during interrupt");
606
607/* Allowable values are 0 ... 100 */
608#ifdef BCE_DEBUG
609/* Generate an interrupt if 0us have elapsed since the last TX completion. */
610static int bce_tx_ticks = 0;
611#else
612/* Generate an interrupt if 80us have elapsed since the last TX completion. */
613static int bce_tx_ticks = DEFAULT_TX_TICKS;
614#endif
615SYSCTL_UINT(_hw_bce, OID_AUTO, tx_ticks, CTLFLAG_RDTUN,
616    &bce_tx_ticks, 0, "Transmit ticks count");
617
618/* Allowable values are 1 ... 100 */
619#ifdef BCE_DEBUG
620/* Generate 1 interrupt for every received frame. */
621static int bce_rx_quick_cons_trip_int = 1;
622#else
623/* Generate 1 interrupt for every 6 received frames. */
624static int bce_rx_quick_cons_trip_int = DEFAULT_RX_QUICK_CONS_TRIP_INT;
625#endif
626SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip_int, CTLFLAG_RDTUN,
627    &bce_rx_quick_cons_trip_int, 0,
628    "Receive BD trip point duirng interrupts");
629
630/* Allowable values are 1 ... 100 */
631#ifdef BCE_DEBUG
632/* Generate 1 interrupt for every received frame. */
633static int bce_rx_quick_cons_trip = 1;
634#else
635/* Generate 1 interrupt for every 6 received frames. */
636static int bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP;
637#endif
638SYSCTL_UINT(_hw_bce, OID_AUTO, rx_quick_cons_trip, CTLFLAG_RDTUN,
639    &bce_rx_quick_cons_trip, 0,
640    "Receive BD trip point");
641
642/* Allowable values are 0 ... 100 */
643#ifdef BCE_DEBUG
644/* Generate an int. if 0us have elapsed since the last received frame. */
645static int bce_rx_ticks_int = 0;
646#else
647/* Generate an int. if 18us have elapsed since the last received frame. */
648static int bce_rx_ticks_int = DEFAULT_RX_TICKS_INT;
649#endif
650SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks_int, CTLFLAG_RDTUN,
651    &bce_rx_ticks_int, 0, "Receive ticks count during interrupt");
652
653/* Allowable values are 0 ... 100 */
654#ifdef BCE_DEBUG
655/* Generate an int. if 0us have elapsed since the last received frame. */
656static int bce_rx_ticks = 0;
657#else
658/* Generate an int. if 18us have elapsed since the last received frame. */
659static int bce_rx_ticks = DEFAULT_RX_TICKS;
660#endif
661SYSCTL_UINT(_hw_bce, OID_AUTO, rx_ticks, CTLFLAG_RDTUN,
662    &bce_rx_ticks, 0, "Receive ticks count");
663
664/****************************************************************************/
665/* Device probe function.                                                   */
666/*                                                                          */
667/* Compares the device to the driver's list of supported devices and        */
668/* reports back to the OS whether this is the right driver for the device.  */
669/*                                                                          */
670/* Returns:                                                                 */
671/*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
672/****************************************************************************/
673static int
674bce_probe(device_t dev)
675{
676	const struct bce_type *t;
677	struct bce_softc *sc;
678	char *descbuf;
679	u16 vid = 0, did = 0, svid = 0, sdid = 0;
680
681	t = bce_devs;
682
683	sc = device_get_softc(dev);
684	sc->bce_unit = device_get_unit(dev);
685	sc->bce_dev = dev;
686
687	/* Get the data for the device to be probed. */
688	vid  = pci_get_vendor(dev);
689	did  = pci_get_device(dev);
690	svid = pci_get_subvendor(dev);
691	sdid = pci_get_subdevice(dev);
692
693	DBPRINT(sc, BCE_EXTREME_LOAD,
694	    "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
695	    "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
696
697	/* Look through the list of known devices for a match. */
698	while(t->bce_name != NULL) {
699		if ((vid == t->bce_vid) && (did == t->bce_did) &&
700		    ((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
701		    ((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
702			descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
703
704			if (descbuf == NULL)
705				return(ENOMEM);
706
707			/* Print out the device identity. */
708			snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
709			    t->bce_name, (((pci_read_config(dev,
710			    PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
711			    (pci_read_config(dev, PCIR_REVID, 4) & 0xf));
712
713			device_set_desc_copy(dev, descbuf);
714			free(descbuf, M_TEMP);
715			return(BUS_PROBE_DEFAULT);
716		}
717		t++;
718	}
719
720	return(ENXIO);
721}
722
723/****************************************************************************/
724/* PCI Capabilities Probe Function.                                         */
725/*                                                                          */
726/* Walks the PCI capabiites list for the device to find what features are   */
727/* supported.                                                               */
728/*                                                                          */
729/* Returns:                                                                 */
730/*   None.                                                                  */
731/****************************************************************************/
732static void
733bce_print_adapter_info(struct bce_softc *sc)
734{
735	int i = 0;
736
737	DBENTER(BCE_VERBOSE_LOAD);
738
739	if (bce_verbose || bootverbose) {
740		BCE_PRINTF("ASIC (0x%08X); ", sc->bce_chipid);
741		printf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >>
742		    12) + 'A', ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
743
744		/* Bus info. */
745		if (sc->bce_flags & BCE_PCIE_FLAG) {
746			printf("Bus (PCIe x%d, ", sc->link_width);
747			switch (sc->link_speed) {
748			case 1: printf("2.5Gbps); "); break;
749			case 2:	printf("5Gbps); "); break;
750			default: printf("Unknown link speed); ");
751			}
752		} else {
753			printf("Bus (PCI%s, %s, %dMHz); ",
754			    ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
755			    ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
756			    "32-bit" : "64-bit"), sc->bus_speed_mhz);
757		}
758
759		/* Firmware version and device features. */
760		printf("B/C (%s); Bufs (RX:%d;TX:%d;PG:%d); Flags (",
761		    sc->bce_bc_ver,	sc->rx_pages, sc->tx_pages,
762		    (bce_hdr_split == TRUE ? sc->pg_pages: 0));
763
764		if (bce_hdr_split == TRUE) {
765			printf("SPLT");
766			i++;
767		}
768
769		if (sc->bce_flags & BCE_USING_MSI_FLAG) {
770			if (i > 0) printf("|");
771			printf("MSI"); i++;
772		}
773
774		if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
775			if (i > 0) printf("|");
776			printf("MSI-X"); i++;
777		}
778
779		if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) {
780			if (i > 0) printf("|");
781			printf("2.5G"); i++;
782		}
783
784		if (sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) {
785			if (i > 0) printf("|");
786			printf("Remote PHY(%s)",
787			    sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG ?
788			    "FIBER" : "TP"); i++;
789		}
790
791		if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
792			if (i > 0) printf("|");
793			printf("MFW); MFW (%s)\n", sc->bce_mfw_ver);
794		} else {
795			printf(")\n");
796		}
797
798		printf("Coal (RX:%d,%d,%d,%d; TX:%d,%d,%d,%d)\n",
799		    sc->bce_rx_quick_cons_trip_int,
800		    sc->bce_rx_quick_cons_trip,
801		    sc->bce_rx_ticks_int,
802		    sc->bce_rx_ticks,
803		    sc->bce_tx_quick_cons_trip_int,
804		    sc->bce_tx_quick_cons_trip,
805		    sc->bce_tx_ticks_int,
806		    sc->bce_tx_ticks);
807	}
808
809	DBEXIT(BCE_VERBOSE_LOAD);
810}
811
812/****************************************************************************/
813/* PCI Capabilities Probe Function.                                         */
814/*                                                                          */
815/* Walks the PCI capabiites list for the device to find what features are   */
816/* supported.                                                               */
817/*                                                                          */
818/* Returns:                                                                 */
819/*   None.                                                                  */
820/****************************************************************************/
821static void
822bce_probe_pci_caps(device_t dev, struct bce_softc *sc)
823{
824	u32 reg;
825
826	DBENTER(BCE_VERBOSE_LOAD);
827
828	/* Check if PCI-X capability is enabled. */
829	if (pci_find_cap(dev, PCIY_PCIX, &reg) == 0) {
830		if (reg != 0)
831			sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
832	}
833
834	/* Check if PCIe capability is enabled. */
835	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
836		if (reg != 0) {
837			u16 link_status = pci_read_config(dev, reg + 0x12, 2);
838			DBPRINT(sc, BCE_INFO_LOAD, "PCIe link_status = "
839			    "0x%08X\n",	link_status);
840			sc->link_speed = link_status & 0xf;
841			sc->link_width = (link_status >> 4) & 0x3f;
842			sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
843			sc->bce_flags |= BCE_PCIE_FLAG;
844		}
845	}
846
847	/* Check if MSI capability is enabled. */
848	if (pci_find_cap(dev, PCIY_MSI, &reg) == 0) {
849		if (reg != 0)
850			sc->bce_cap_flags |= BCE_MSI_CAPABLE_FLAG;
851	}
852
853	/* Check if MSI-X capability is enabled. */
854	if (pci_find_cap(dev, PCIY_MSIX, &reg) == 0) {
855		if (reg != 0)
856			sc->bce_cap_flags |= BCE_MSIX_CAPABLE_FLAG;
857	}
858
859	DBEXIT(BCE_VERBOSE_LOAD);
860}
861
862/****************************************************************************/
863/* Load and validate user tunable settings.                                 */
864/*                                                                          */
865/* Returns:                                                                 */
866/*   Nothing.                                                               */
867/****************************************************************************/
868static void
869bce_set_tunables(struct bce_softc *sc)
870{
871	/* Set sysctl values for RX page count. */
872	switch (bce_rx_pages) {
873	case 1:
874		/* fall-through */
875	case 2:
876		/* fall-through */
877	case 4:
878		/* fall-through */
879	case 8:
880		sc->rx_pages = bce_rx_pages;
881		break;
882	default:
883		sc->rx_pages = DEFAULT_RX_PAGES;
884		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
885		    "hw.bce.rx_pages!  Setting default of %d.\n",
886		    __FILE__, __LINE__, bce_rx_pages, DEFAULT_RX_PAGES);
887	}
888
889	/* ToDo: Consider allowing user setting for pg_pages. */
890	sc->pg_pages = min((sc->rx_pages * 4), MAX_PG_PAGES);
891
892	/* Set sysctl values for TX page count. */
893	switch (bce_tx_pages) {
894	case 1:
895		/* fall-through */
896	case 2:
897		/* fall-through */
898	case 4:
899		/* fall-through */
900	case 8:
901		sc->tx_pages = bce_tx_pages;
902		break;
903	default:
904		sc->tx_pages = DEFAULT_TX_PAGES;
905		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
906		    "hw.bce.tx_pages!  Setting default of %d.\n",
907		    __FILE__, __LINE__, bce_tx_pages, DEFAULT_TX_PAGES);
908	}
909
910	/*
911	 * Validate the TX trip point (i.e. the number of
912	 * TX completions before a status block update is
913	 * generated and an interrupt is asserted.
914	 */
915	if (bce_tx_quick_cons_trip_int <= 100) {
916		sc->bce_tx_quick_cons_trip_int =
917		    bce_tx_quick_cons_trip_int;
918	} else {
919		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
920		    "hw.bce.tx_quick_cons_trip_int!  Setting default of %d.\n",
921		    __FILE__, __LINE__, bce_tx_quick_cons_trip_int,
922		    DEFAULT_TX_QUICK_CONS_TRIP_INT);
923		sc->bce_tx_quick_cons_trip_int =
924		    DEFAULT_TX_QUICK_CONS_TRIP_INT;
925	}
926
927	if (bce_tx_quick_cons_trip <= 100) {
928		sc->bce_tx_quick_cons_trip =
929		    bce_tx_quick_cons_trip;
930	} else {
931		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
932		    "hw.bce.tx_quick_cons_trip!  Setting default of %d.\n",
933		    __FILE__, __LINE__, bce_tx_quick_cons_trip,
934		    DEFAULT_TX_QUICK_CONS_TRIP);
935		sc->bce_tx_quick_cons_trip =
936		    DEFAULT_TX_QUICK_CONS_TRIP;
937	}
938
939	/*
940	 * Validate the TX ticks count (i.e. the maximum amount
941	 * of time to wait after the last TX completion has
942	 * occurred before a status block update is generated
943	 * and an interrupt is asserted.
944	 */
945	if (bce_tx_ticks_int <= 100) {
946		sc->bce_tx_ticks_int =
947		    bce_tx_ticks_int;
948	} else {
949		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
950		    "hw.bce.tx_ticks_int!  Setting default of %d.\n",
951		    __FILE__, __LINE__, bce_tx_ticks_int,
952		    DEFAULT_TX_TICKS_INT);
953		sc->bce_tx_ticks_int =
954		    DEFAULT_TX_TICKS_INT;
955	   }
956
957	if (bce_tx_ticks <= 100) {
958		sc->bce_tx_ticks =
959		    bce_tx_ticks;
960	} else {
961		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
962		    "hw.bce.tx_ticks!  Setting default of %d.\n",
963		    __FILE__, __LINE__, bce_tx_ticks,
964		    DEFAULT_TX_TICKS);
965		sc->bce_tx_ticks =
966		    DEFAULT_TX_TICKS;
967	}
968
969	/*
970	 * Validate the RX trip point (i.e. the number of
971	 * RX frames received before a status block update is
972	 * generated and an interrupt is asserted.
973	 */
974	if (bce_rx_quick_cons_trip_int <= 100) {
975		sc->bce_rx_quick_cons_trip_int =
976		    bce_rx_quick_cons_trip_int;
977	} else {
978		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
979		    "hw.bce.rx_quick_cons_trip_int!  Setting default of %d.\n",
980		    __FILE__, __LINE__, bce_rx_quick_cons_trip_int,
981		    DEFAULT_RX_QUICK_CONS_TRIP_INT);
982		sc->bce_rx_quick_cons_trip_int =
983		    DEFAULT_RX_QUICK_CONS_TRIP_INT;
984	}
985
986	if (bce_rx_quick_cons_trip <= 100) {
987		sc->bce_rx_quick_cons_trip =
988		    bce_rx_quick_cons_trip;
989	} else {
990		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
991		    "hw.bce.rx_quick_cons_trip!  Setting default of %d.\n",
992		    __FILE__, __LINE__, bce_rx_quick_cons_trip,
993		    DEFAULT_RX_QUICK_CONS_TRIP);
994		sc->bce_rx_quick_cons_trip =
995		    DEFAULT_RX_QUICK_CONS_TRIP;
996	}
997
998	/*
999	 * Validate the RX ticks count (i.e. the maximum amount
1000	 * of time to wait after the last RX frame has been
1001	 * received before a status block update is generated
1002	 * and an interrupt is asserted.
1003	 */
1004	if (bce_rx_ticks_int <= 100) {
1005		sc->bce_rx_ticks_int = bce_rx_ticks_int;
1006	} else {
1007		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
1008		    "hw.bce.rx_ticks_int!  Setting default of %d.\n",
1009		    __FILE__, __LINE__, bce_rx_ticks_int,
1010		    DEFAULT_RX_TICKS_INT);
1011		sc->bce_rx_ticks_int = DEFAULT_RX_TICKS_INT;
1012	}
1013
1014	if (bce_rx_ticks <= 100) {
1015		sc->bce_rx_ticks = bce_rx_ticks;
1016	} else {
1017		BCE_PRINTF("%s(%d): Illegal value (%d) specified for "
1018		    "hw.bce.rx_ticks!  Setting default of %d.\n",
1019		    __FILE__, __LINE__, bce_rx_ticks,
1020		    DEFAULT_RX_TICKS);
1021		sc->bce_rx_ticks = DEFAULT_RX_TICKS;
1022	}
1023
1024	/* Disabling both RX ticks and RX trips will prevent interrupts. */
1025	if ((bce_rx_quick_cons_trip == 0) && (bce_rx_ticks == 0)) {
1026		BCE_PRINTF("%s(%d): Cannot set both hw.bce.rx_ticks and "
1027		    "hw.bce.rx_quick_cons_trip to 0. Setting default values.\n",
1028		   __FILE__, __LINE__);
1029		sc->bce_rx_ticks = DEFAULT_RX_TICKS;
1030		sc->bce_rx_quick_cons_trip = DEFAULT_RX_QUICK_CONS_TRIP;
1031	}
1032
1033	/* Disabling both TX ticks and TX trips will prevent interrupts. */
1034	if ((bce_tx_quick_cons_trip == 0) && (bce_tx_ticks == 0)) {
1035		BCE_PRINTF("%s(%d): Cannot set both hw.bce.tx_ticks and "
1036		    "hw.bce.tx_quick_cons_trip to 0. Setting default values.\n",
1037		   __FILE__, __LINE__);
1038		sc->bce_tx_ticks = DEFAULT_TX_TICKS;
1039		sc->bce_tx_quick_cons_trip = DEFAULT_TX_QUICK_CONS_TRIP;
1040	}
1041}
1042
1043/****************************************************************************/
1044/* Device attach function.                                                  */
1045/*                                                                          */
1046/* Allocates device resources, performs secondary chip identification,      */
1047/* resets and initializes the hardware, and initializes driver instance     */
1048/* variables.                                                               */
1049/*                                                                          */
1050/* Returns:                                                                 */
1051/*   0 on success, positive value on failure.                               */
1052/****************************************************************************/
1053static int
1054bce_attach(device_t dev)
1055{
1056	struct bce_softc *sc;
1057	struct ifnet *ifp;
1058	u32 val;
1059	int count, error, rc = 0, rid;
1060
1061	sc = device_get_softc(dev);
1062	sc->bce_dev = dev;
1063
1064	DBENTER(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
1065
1066	sc->bce_unit = device_get_unit(dev);
1067
1068	/* Set initial device and PHY flags */
1069	sc->bce_flags = 0;
1070	sc->bce_phy_flags = 0;
1071
1072	bce_set_tunables(sc);
1073
1074	pci_enable_busmaster(dev);
1075
1076	/* Allocate PCI memory resources. */
1077	rid = PCIR_BAR(0);
1078	sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1079		&rid, RF_ACTIVE);
1080
1081	if (sc->bce_res_mem == NULL) {
1082		BCE_PRINTF("%s(%d): PCI memory allocation failed\n",
1083		    __FILE__, __LINE__);
1084		rc = ENXIO;
1085		goto bce_attach_fail;
1086	}
1087
1088	/* Get various resource handles. */
1089	sc->bce_btag    = rman_get_bustag(sc->bce_res_mem);
1090	sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
1091	sc->bce_vhandle = (vm_offset_t) rman_get_virtual(sc->bce_res_mem);
1092
1093	bce_probe_pci_caps(dev, sc);
1094
1095	rid = 1;
1096	count = 0;
1097#if 0
1098	/* Try allocating MSI-X interrupts. */
1099	if ((sc->bce_cap_flags & BCE_MSIX_CAPABLE_FLAG) &&
1100		(bce_msi_enable >= 2) &&
1101		((sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1102		&rid, RF_ACTIVE)) != NULL)) {
1103		msi_needed = count = 1;
1104
1105		if (((error = pci_alloc_msix(dev, &count)) != 0) ||
1106			(count != msi_needed)) {
1107			BCE_PRINTF("%s(%d): MSI-X allocation failed! Requested = %d,"
1108				"Received = %d, error = %d\n", __FILE__, __LINE__,
1109				msi_needed, count, error);
1110			count = 0;
1111			pci_release_msi(dev);
1112			bus_release_resource(dev, SYS_RES_MEMORY, rid,
1113				sc->bce_res_irq);
1114			sc->bce_res_irq = NULL;
1115		} else {
1116			DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI-X interrupt.\n",
1117				__FUNCTION__);
1118			sc->bce_flags |= BCE_USING_MSIX_FLAG;
1119		}
1120	}
1121#endif
1122
1123	/* Try allocating a MSI interrupt. */
1124	if ((sc->bce_cap_flags & BCE_MSI_CAPABLE_FLAG) &&
1125		(bce_msi_enable >= 1) && (count == 0)) {
1126		count = 1;
1127		if ((error = pci_alloc_msi(dev, &count)) != 0) {
1128			BCE_PRINTF("%s(%d): MSI allocation failed! "
1129			    "error = %d\n", __FILE__, __LINE__, error);
1130			count = 0;
1131			pci_release_msi(dev);
1132		} else {
1133			DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using MSI "
1134			    "interrupt.\n", __FUNCTION__);
1135			sc->bce_flags |= BCE_USING_MSI_FLAG;
1136			if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
1137				sc->bce_flags |= BCE_ONE_SHOT_MSI_FLAG;
1138			rid = 1;
1139		}
1140	}
1141
1142	/* Try allocating a legacy interrupt. */
1143	if (count == 0) {
1144		DBPRINT(sc, BCE_INFO_LOAD, "%s(): Using INTx interrupt.\n",
1145			__FUNCTION__);
1146		rid = 0;
1147	}
1148
1149	sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1150	    &rid, RF_ACTIVE | (count != 0 ? 0 : RF_SHAREABLE));
1151
1152	/* Report any IRQ allocation errors. */
1153	if (sc->bce_res_irq == NULL) {
1154		BCE_PRINTF("%s(%d): PCI map interrupt failed!\n",
1155		    __FILE__, __LINE__);
1156		rc = ENXIO;
1157		goto bce_attach_fail;
1158	}
1159
1160	/* Initialize mutex for the current device instance. */
1161	BCE_LOCK_INIT(sc, device_get_nameunit(dev));
1162
1163	/*
1164	 * Configure byte swap and enable indirect register access.
1165	 * Rely on CPU to do target byte swapping on big endian systems.
1166	 * Access to registers outside of PCI configurtion space are not
1167	 * valid until this is done.
1168	 */
1169	pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
1170	    BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
1171	    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
1172
1173	/* Save ASIC revsion info. */
1174	sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
1175
1176	/* Weed out any non-production controller revisions. */
1177	switch(BCE_CHIP_ID(sc)) {
1178	case BCE_CHIP_ID_5706_A0:
1179	case BCE_CHIP_ID_5706_A1:
1180	case BCE_CHIP_ID_5708_A0:
1181	case BCE_CHIP_ID_5708_B0:
1182	case BCE_CHIP_ID_5709_A0:
1183	case BCE_CHIP_ID_5709_B0:
1184	case BCE_CHIP_ID_5709_B1:
1185	case BCE_CHIP_ID_5709_B2:
1186		BCE_PRINTF("%s(%d): Unsupported controller "
1187		    "revision (%c%d)!\n", __FILE__, __LINE__,
1188		    (((pci_read_config(dev, PCIR_REVID, 4) &
1189		    0xf0) >> 4) + 'A'), (pci_read_config(dev,
1190		    PCIR_REVID, 4) & 0xf));
1191		rc = ENODEV;
1192		goto bce_attach_fail;
1193	}
1194
1195	/*
1196	 * The embedded PCIe to PCI-X bridge (EPB)
1197	 * in the 5708 cannot address memory above
1198	 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
1199	 */
1200	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
1201		sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
1202	else
1203		sc->max_bus_addr = BUS_SPACE_MAXADDR;
1204
1205	/*
1206	 * Find the base address for shared memory access.
1207	 * Newer versions of bootcode use a signature and offset
1208	 * while older versions use a fixed address.
1209	 */
1210	val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
1211	if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
1212		/* Multi-port devices use different offsets in shared memory. */
1213		sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0 +
1214		    (pci_get_function(sc->bce_dev) << 2));
1215	else
1216		sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
1217
1218	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): bce_shmem_base = 0x%08X\n",
1219	    __FUNCTION__, sc->bce_shmem_base);
1220
1221	/* Fetch the bootcode revision. */
1222	val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
1223	for (int i = 0, j = 0; i < 3; i++) {
1224		u8 num;
1225
1226		num = (u8) (val >> (24 - (i * 8)));
1227		for (int k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
1228			if (num >= k || !skip0 || k == 1) {
1229				sc->bce_bc_ver[j++] = (num / k) + '0';
1230				skip0 = 0;
1231			}
1232		}
1233
1234		if (i != 2)
1235			sc->bce_bc_ver[j++] = '.';
1236	}
1237
1238	/* Check if any management firwmare is enabled. */
1239	val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
1240	if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
1241		sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
1242
1243		/* Allow time for firmware to enter the running state. */
1244		for (int i = 0; i < 30; i++) {
1245			val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
1246			if (val & BCE_CONDITION_MFW_RUN_MASK)
1247				break;
1248			DELAY(10000);
1249		}
1250
1251		/* Check if management firmware is running. */
1252		val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
1253		val &= BCE_CONDITION_MFW_RUN_MASK;
1254		if ((val != BCE_CONDITION_MFW_RUN_UNKNOWN) &&
1255		    (val != BCE_CONDITION_MFW_RUN_NONE)) {
1256			u32 addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
1257			int i = 0;
1258
1259			/* Read the management firmware version string. */
1260			for (int j = 0; j < 3; j++) {
1261				val = bce_reg_rd_ind(sc, addr + j * 4);
1262				val = bswap32(val);
1263				memcpy(&sc->bce_mfw_ver[i], &val, 4);
1264				i += 4;
1265			}
1266		} else {
1267			/* May cause firmware synchronization timeouts. */
1268			BCE_PRINTF("%s(%d): Management firmware enabled "
1269			    "but not running!\n", __FILE__, __LINE__);
1270			strcpy(sc->bce_mfw_ver, "NOT RUNNING!");
1271
1272			/* ToDo: Any action the driver should take? */
1273		}
1274	}
1275
1276	/* Get PCI bus information (speed and type). */
1277	val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
1278	if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
1279		u32 clkreg;
1280
1281		sc->bce_flags |= BCE_PCIX_FLAG;
1282
1283		clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS);
1284
1285		clkreg &= BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
1286		switch (clkreg) {
1287		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
1288			sc->bus_speed_mhz = 133;
1289			break;
1290
1291		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
1292			sc->bus_speed_mhz = 100;
1293			break;
1294
1295		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
1296		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
1297			sc->bus_speed_mhz = 66;
1298			break;
1299
1300		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
1301		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
1302			sc->bus_speed_mhz = 50;
1303			break;
1304
1305		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
1306		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
1307		case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
1308			sc->bus_speed_mhz = 33;
1309			break;
1310		}
1311	} else {
1312		if (val & BCE_PCICFG_MISC_STATUS_M66EN)
1313			sc->bus_speed_mhz = 66;
1314		else
1315			sc->bus_speed_mhz = 33;
1316	}
1317
1318	if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
1319		sc->bce_flags |= BCE_PCI_32BIT_FLAG;
1320
1321	/* Find the media type for the adapter. */
1322	bce_get_media(sc);
1323
1324	/* Reset controller and announce to bootcode that driver is present. */
1325	if (bce_reset(sc, BCE_DRV_MSG_CODE_RESET)) {
1326		BCE_PRINTF("%s(%d): Controller reset failed!\n",
1327		    __FILE__, __LINE__);
1328		rc = ENXIO;
1329		goto bce_attach_fail;
1330	}
1331
1332	/* Initialize the controller. */
1333	if (bce_chipinit(sc)) {
1334		BCE_PRINTF("%s(%d): Controller initialization failed!\n",
1335		    __FILE__, __LINE__);
1336		rc = ENXIO;
1337		goto bce_attach_fail;
1338	}
1339
1340	/* Perform NVRAM test. */
1341	if (bce_nvram_test(sc)) {
1342		BCE_PRINTF("%s(%d): NVRAM test failed!\n",
1343		    __FILE__, __LINE__);
1344		rc = ENXIO;
1345		goto bce_attach_fail;
1346	}
1347
1348	/* Fetch the permanent Ethernet MAC address. */
1349	bce_get_mac_addr(sc);
1350
1351	/* Update statistics once every second. */
1352	sc->bce_stats_ticks = 1000000 & 0xffff00;
1353
1354	/* Store data needed by PHY driver for backplane applications */
1355	sc->bce_shared_hw_cfg = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1356	sc->bce_port_hw_cfg   = bce_shmem_rd(sc, BCE_PORT_HW_CFG_CONFIG);
1357
1358	/* Allocate DMA memory resources. */
1359	if (bce_dma_alloc(dev)) {
1360		BCE_PRINTF("%s(%d): DMA resource allocation failed!\n",
1361		    __FILE__, __LINE__);
1362		rc = ENXIO;
1363		goto bce_attach_fail;
1364	}
1365
1366	/* Allocate an ifnet structure. */
1367	ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
1368	if (ifp == NULL) {
1369		BCE_PRINTF("%s(%d): Interface allocation failed!\n",
1370		    __FILE__, __LINE__);
1371		rc = ENXIO;
1372		goto bce_attach_fail;
1373	}
1374
1375	/* Initialize the ifnet interface. */
1376	ifp->if_softc	= sc;
1377	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1378	ifp->if_flags	= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1379	ifp->if_ioctl	= bce_ioctl;
1380	ifp->if_start	= bce_start;
1381	ifp->if_get_counter = bce_get_counter;
1382	ifp->if_init	= bce_init;
1383	ifp->if_mtu	= ETHERMTU;
1384
1385	if (bce_tso_enable) {
1386		ifp->if_hwassist = BCE_IF_HWASSIST | CSUM_TSO;
1387		ifp->if_capabilities = BCE_IF_CAPABILITIES | IFCAP_TSO4 |
1388		    IFCAP_VLAN_HWTSO;
1389	} else {
1390		ifp->if_hwassist = BCE_IF_HWASSIST;
1391		ifp->if_capabilities = BCE_IF_CAPABILITIES;
1392	}
1393
1394	if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0)
1395		ifp->if_capabilities |= IFCAP_LINKSTATE;
1396
1397	ifp->if_capenable = ifp->if_capabilities;
1398
1399	/*
1400	 * Assume standard mbuf sizes for buffer allocation.
1401	 * This may change later if the MTU size is set to
1402	 * something other than 1500.
1403	 */
1404	bce_get_rx_buffer_sizes(sc,
1405	    (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN));
1406
1407	/* Recalculate our buffer allocation sizes. */
1408	ifp->if_snd.ifq_drv_maxlen = USABLE_TX_BD_ALLOC;
1409	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
1410	IFQ_SET_READY(&ifp->if_snd);
1411
1412	if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
1413		ifp->if_baudrate = IF_Mbps(2500ULL);
1414	else
1415		ifp->if_baudrate = IF_Mbps(1000);
1416
1417	/* Handle any special PHY initialization for SerDes PHYs. */
1418	bce_init_media(sc);
1419
1420	if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) {
1421		ifmedia_init(&sc->bce_ifmedia, IFM_IMASK, bce_ifmedia_upd,
1422		    bce_ifmedia_sts);
1423		/*
1424		 * We can't manually override remote PHY's link and assume
1425		 * PHY port configuration(Fiber or TP) is not changed after
1426		 * device attach.  This may not be correct though.
1427		 */
1428		if ((sc->bce_phy_flags & BCE_PHY_REMOTE_PORT_FIBER_FLAG) != 0) {
1429			if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG) {
1430				ifmedia_add(&sc->bce_ifmedia,
1431				    IFM_ETHER | IFM_2500_SX, 0, NULL);
1432				ifmedia_add(&sc->bce_ifmedia,
1433				    IFM_ETHER | IFM_2500_SX | IFM_FDX, 0, NULL);
1434			}
1435			ifmedia_add(&sc->bce_ifmedia,
1436			    IFM_ETHER | IFM_1000_SX, 0, NULL);
1437			ifmedia_add(&sc->bce_ifmedia,
1438			    IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL);
1439		} else {
1440			ifmedia_add(&sc->bce_ifmedia,
1441			    IFM_ETHER | IFM_10_T, 0, NULL);
1442			ifmedia_add(&sc->bce_ifmedia,
1443			    IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1444			ifmedia_add(&sc->bce_ifmedia,
1445			    IFM_ETHER | IFM_100_TX, 0, NULL);
1446			ifmedia_add(&sc->bce_ifmedia,
1447			    IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1448			ifmedia_add(&sc->bce_ifmedia,
1449			    IFM_ETHER | IFM_1000_T, 0, NULL);
1450			ifmedia_add(&sc->bce_ifmedia,
1451			    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1452		}
1453		ifmedia_add(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
1454		ifmedia_set(&sc->bce_ifmedia, IFM_ETHER | IFM_AUTO);
1455		sc->bce_ifmedia.ifm_media = sc->bce_ifmedia.ifm_cur->ifm_media;
1456	} else {
1457		/* MII child bus by attaching the PHY. */
1458		rc = mii_attach(dev, &sc->bce_miibus, ifp, bce_ifmedia_upd,
1459		    bce_ifmedia_sts, BMSR_DEFCAPMASK, sc->bce_phy_addr,
1460		    MII_OFFSET_ANY, MIIF_DOPAUSE);
1461		if (rc != 0) {
1462			BCE_PRINTF("%s(%d): attaching PHYs failed\n", __FILE__,
1463			    __LINE__);
1464			goto bce_attach_fail;
1465		}
1466	}
1467
1468	/* Attach to the Ethernet interface list. */
1469	ether_ifattach(ifp, sc->eaddr);
1470
1471	callout_init_mtx(&sc->bce_tick_callout, &sc->bce_mtx, 0);
1472	callout_init_mtx(&sc->bce_pulse_callout, &sc->bce_mtx, 0);
1473
1474	/* Hookup IRQ last. */
1475	rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_TYPE_NET | INTR_MPSAFE,
1476		NULL, bce_intr, sc, &sc->bce_intrhand);
1477
1478	if (rc) {
1479		BCE_PRINTF("%s(%d): Failed to setup IRQ!\n",
1480		    __FILE__, __LINE__);
1481		bce_detach(dev);
1482		goto bce_attach_exit;
1483	}
1484
1485	/*
1486	 * At this point we've acquired all the resources
1487	 * we need to run so there's no turning back, we're
1488	 * cleared for launch.
1489	 */
1490
1491	/* Print some important debugging info. */
1492	DBRUNMSG(BCE_INFO, bce_dump_driver_state(sc));
1493
1494	/* Add the supported sysctls to the kernel. */
1495	bce_add_sysctls(sc);
1496
1497	BCE_LOCK(sc);
1498
1499	/*
1500	 * The chip reset earlier notified the bootcode that
1501	 * a driver is present.  We now need to start our pulse
1502	 * routine so that the bootcode is reminded that we're
1503	 * still running.
1504	 */
1505	bce_pulse(sc);
1506
1507	bce_mgmt_init_locked(sc);
1508	BCE_UNLOCK(sc);
1509
1510	/* Finally, print some useful adapter info */
1511	bce_print_adapter_info(sc);
1512	DBPRINT(sc, BCE_FATAL, "%s(): sc = %p\n",
1513		__FUNCTION__, sc);
1514
1515	goto bce_attach_exit;
1516
1517bce_attach_fail:
1518	bce_release_resources(sc);
1519
1520bce_attach_exit:
1521
1522	DBEXIT(BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
1523
1524	return(rc);
1525}
1526
1527/****************************************************************************/
1528/* Device detach function.                                                  */
1529/*                                                                          */
1530/* Stops the controller, resets the controller, and releases resources.     */
1531/*                                                                          */
1532/* Returns:                                                                 */
1533/*   0 on success, positive value on failure.                               */
1534/****************************************************************************/
1535static int
1536bce_detach(device_t dev)
1537{
1538	struct bce_softc *sc = device_get_softc(dev);
1539	struct ifnet *ifp;
1540	u32 msg;
1541
1542	DBENTER(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1543
1544	ifp = sc->bce_ifp;
1545
1546	/* Stop and reset the controller. */
1547	BCE_LOCK(sc);
1548
1549	/* Stop the pulse so the bootcode can go to driver absent state. */
1550	callout_stop(&sc->bce_pulse_callout);
1551
1552	bce_stop(sc);
1553	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1554		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1555	else
1556		msg = BCE_DRV_MSG_CODE_UNLOAD;
1557	bce_reset(sc, msg);
1558
1559	BCE_UNLOCK(sc);
1560
1561	ether_ifdetach(ifp);
1562
1563	/* If we have a child device on the MII bus remove it too. */
1564	if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0)
1565		ifmedia_removeall(&sc->bce_ifmedia);
1566	else {
1567		bus_generic_detach(dev);
1568		device_delete_child(dev, sc->bce_miibus);
1569	}
1570
1571	/* Release all remaining resources. */
1572	bce_release_resources(sc);
1573
1574	DBEXIT(BCE_VERBOSE_UNLOAD | BCE_VERBOSE_RESET);
1575
1576	return(0);
1577}
1578
1579/****************************************************************************/
1580/* Device shutdown function.                                                */
1581/*                                                                          */
1582/* Stops and resets the controller.                                         */
1583/*                                                                          */
1584/* Returns:                                                                 */
1585/*   0 on success, positive value on failure.                               */
1586/****************************************************************************/
1587static int
1588bce_shutdown(device_t dev)
1589{
1590	struct bce_softc *sc = device_get_softc(dev);
1591	u32 msg;
1592
1593	DBENTER(BCE_VERBOSE);
1594
1595	BCE_LOCK(sc);
1596	bce_stop(sc);
1597	if (sc->bce_flags & BCE_NO_WOL_FLAG)
1598		msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1599	else
1600		msg = BCE_DRV_MSG_CODE_UNLOAD;
1601	bce_reset(sc, msg);
1602	BCE_UNLOCK(sc);
1603
1604	DBEXIT(BCE_VERBOSE);
1605
1606	return (0);
1607}
1608
1609#ifdef BCE_DEBUG
1610/****************************************************************************/
1611/* Register read.                                                           */
1612/*                                                                          */
1613/* Returns:                                                                 */
1614/*   The value of the register.                                             */
1615/****************************************************************************/
1616static u32
1617bce_reg_rd(struct bce_softc *sc, u32 offset)
1618{
1619	u32 val = REG_RD(sc, offset);
1620	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1621		__FUNCTION__, offset, val);
1622	return val;
1623}
1624
1625/****************************************************************************/
1626/* Register write (16 bit).                                                 */
1627/*                                                                          */
1628/* Returns:                                                                 */
1629/*   Nothing.                                                               */
1630/****************************************************************************/
1631static void
1632bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
1633{
1634	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
1635		__FUNCTION__, offset, val);
1636	REG_WR16(sc, offset, val);
1637}
1638
1639/****************************************************************************/
1640/* Register write.                                                          */
1641/*                                                                          */
1642/* Returns:                                                                 */
1643/*   Nothing.                                                               */
1644/****************************************************************************/
1645static void
1646bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
1647{
1648	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1649		__FUNCTION__, offset, val);
1650	REG_WR(sc, offset, val);
1651}
1652#endif
1653
1654/****************************************************************************/
1655/* Indirect register read.                                                  */
1656/*                                                                          */
1657/* Reads NetXtreme II registers using an index/data register pair in PCI    */
1658/* configuration space.  Using this mechanism avoids issues with posted     */
1659/* reads but is much slower than memory-mapped I/O.                         */
1660/*                                                                          */
1661/* Returns:                                                                 */
1662/*   The value of the register.                                             */
1663/****************************************************************************/
1664static u32
1665bce_reg_rd_ind(struct bce_softc *sc, u32 offset)
1666{
1667	device_t dev;
1668	dev = sc->bce_dev;
1669
1670	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1671#ifdef BCE_DEBUG
1672	{
1673		u32 val;
1674		val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1675		DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1676			__FUNCTION__, offset, val);
1677		return val;
1678	}
1679#else
1680	return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1681#endif
1682}
1683
1684/****************************************************************************/
1685/* Indirect register write.                                                 */
1686/*                                                                          */
1687/* Writes NetXtreme II registers using an index/data register pair in PCI   */
1688/* configuration space.  Using this mechanism avoids issues with posted     */
1689/* writes but is muchh slower than memory-mapped I/O.                       */
1690/*                                                                          */
1691/* Returns:                                                                 */
1692/*   Nothing.                                                               */
1693/****************************************************************************/
1694static void
1695bce_reg_wr_ind(struct bce_softc *sc, u32 offset, u32 val)
1696{
1697	device_t dev;
1698	dev = sc->bce_dev;
1699
1700	DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
1701		__FUNCTION__, offset, val);
1702
1703	pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1704	pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1705}
1706
1707/****************************************************************************/
1708/* Shared memory write.                                                     */
1709/*                                                                          */
1710/* Writes NetXtreme II shared memory region.                                */
1711/*                                                                          */
1712/* Returns:                                                                 */
1713/*   Nothing.                                                               */
1714/****************************************************************************/
1715static void
1716bce_shmem_wr(struct bce_softc *sc, u32 offset, u32 val)
1717{
1718	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Writing 0x%08X  to  "
1719	    "0x%08X\n",	__FUNCTION__, val, offset);
1720
1721	bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1722}
1723
1724/****************************************************************************/
1725/* Shared memory read.                                                      */
1726/*                                                                          */
1727/* Reads NetXtreme II shared memory region.                                 */
1728/*                                                                          */
1729/* Returns:                                                                 */
1730/*   The 32 bit value read.                                                 */
1731/****************************************************************************/
1732static u32
1733bce_shmem_rd(struct bce_softc *sc, u32 offset)
1734{
1735	u32 val = bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1736
1737	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "%s(): Reading 0x%08X from "
1738	    "0x%08X\n",	__FUNCTION__, val, offset);
1739
1740	return val;
1741}
1742
1743#ifdef BCE_DEBUG
1744/****************************************************************************/
1745/* Context memory read.                                                     */
1746/*                                                                          */
1747/* The NetXtreme II controller uses context memory to track connection      */
1748/* information for L2 and higher network protocols.                         */
1749/*                                                                          */
1750/* Returns:                                                                 */
1751/*   The requested 32 bit value of context memory.                          */
1752/****************************************************************************/
1753static u32
1754bce_ctx_rd(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset)
1755{
1756	u32 idx, offset, retry_cnt = 5, val;
1757
1758	DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 ||
1759	    cid_addr & CTX_MASK), BCE_PRINTF("%s(): Invalid CID "
1760	    "address: 0x%08X.\n", __FUNCTION__, cid_addr));
1761
1762	offset = ctx_offset + cid_addr;
1763
1764	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
1765		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_READ_REQ));
1766
1767		for (idx = 0; idx < retry_cnt; idx++) {
1768			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1769			if ((val & BCE_CTX_CTX_CTRL_READ_REQ) == 0)
1770				break;
1771			DELAY(5);
1772		}
1773
1774		if (val & BCE_CTX_CTX_CTRL_READ_REQ)
1775			BCE_PRINTF("%s(%d); Unable to read CTX memory: "
1776			    "cid_addr = 0x%08X, offset = 0x%08X!\n",
1777			    __FILE__, __LINE__, cid_addr, ctx_offset);
1778
1779		val = REG_RD(sc, BCE_CTX_CTX_DATA);
1780	} else {
1781		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1782		val = REG_RD(sc, BCE_CTX_DATA);
1783	}
1784
1785	DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1786		"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, val);
1787
1788	return(val);
1789}
1790#endif
1791
1792/****************************************************************************/
1793/* Context memory write.                                                    */
1794/*                                                                          */
1795/* The NetXtreme II controller uses context memory to track connection      */
1796/* information for L2 and higher network protocols.                         */
1797/*                                                                          */
1798/* Returns:                                                                 */
1799/*   Nothing.                                                               */
1800/****************************************************************************/
1801static void
1802bce_ctx_wr(struct bce_softc *sc, u32 cid_addr, u32 ctx_offset, u32 ctx_val)
1803{
1804	u32 idx, offset = ctx_offset + cid_addr;
1805	u32 val, retry_cnt = 5;
1806
1807	DBPRINT(sc, BCE_EXTREME_CTX, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
1808		"val = 0x%08X\n", __FUNCTION__, cid_addr, ctx_offset, ctx_val);
1809
1810	DBRUNIF((cid_addr > MAX_CID_ADDR || ctx_offset & 0x3 || cid_addr & CTX_MASK),
1811		BCE_PRINTF("%s(): Invalid CID address: 0x%08X.\n",
1812		    __FUNCTION__, cid_addr));
1813
1814	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
1815		REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1816		REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1817
1818		for (idx = 0; idx < retry_cnt; idx++) {
1819			val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1820			if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1821				break;
1822			DELAY(5);
1823		}
1824
1825		if (val & BCE_CTX_CTX_CTRL_WRITE_REQ)
1826			BCE_PRINTF("%s(%d); Unable to write CTX memory: "
1827			    "cid_addr = 0x%08X, offset = 0x%08X!\n",
1828			    __FILE__, __LINE__, cid_addr, ctx_offset);
1829
1830	} else {
1831		REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1832		REG_WR(sc, BCE_CTX_DATA, ctx_val);
1833	}
1834}
1835
1836/****************************************************************************/
1837/* PHY register read.                                                       */
1838/*                                                                          */
1839/* Implements register reads on the MII bus.                                */
1840/*                                                                          */
1841/* Returns:                                                                 */
1842/*   The value of the register.                                             */
1843/****************************************************************************/
1844static int
1845bce_miibus_read_reg(device_t dev, int phy, int reg)
1846{
1847	struct bce_softc *sc;
1848	u32 val;
1849	int i;
1850
1851	sc = device_get_softc(dev);
1852
1853    /*
1854     * The 5709S PHY is an IEEE Clause 45 PHY
1855     * with special mappings to work with IEEE
1856     * Clause 22 register accesses.
1857     */
1858	if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1859		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1860			reg += 0x10;
1861	}
1862
1863    if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1864		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1865		val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1866
1867		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1868		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1869
1870		DELAY(40);
1871	}
1872
1873	val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1874	    BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1875	    BCE_EMAC_MDIO_COMM_START_BUSY;
1876	REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1877
1878	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1879		DELAY(10);
1880
1881		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1882		if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1883			DELAY(5);
1884
1885			val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1886			val &= BCE_EMAC_MDIO_COMM_DATA;
1887
1888			break;
1889		}
1890	}
1891
1892	if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1893		BCE_PRINTF("%s(%d): Error: PHY read timeout! phy = %d, "
1894		    "reg = 0x%04X\n", __FILE__, __LINE__, phy, reg);
1895		val = 0x0;
1896	} else {
1897		val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1898	}
1899
1900	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1901		val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1902		val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1903
1904		REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1905		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1906
1907		DELAY(40);
1908	}
1909
1910	DB_PRINT_PHY_REG(reg, val);
1911	return (val & 0xffff);
1912}
1913
1914/****************************************************************************/
1915/* PHY register write.                                                      */
1916/*                                                                          */
1917/* Implements register writes on the MII bus.                               */
1918/*                                                                          */
1919/* Returns:                                                                 */
1920/*   The value of the register.                                             */
1921/****************************************************************************/
1922static int
1923bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1924{
1925	struct bce_softc *sc;
1926	u32 val1;
1927	int i;
1928
1929	sc = device_get_softc(dev);
1930
1931	DB_PRINT_PHY_REG(reg, val);
1932
1933	/*
1934	 * The 5709S PHY is an IEEE Clause 45 PHY
1935	 * with special mappings to work with IEEE
1936	 * Clause 22 register accesses.
1937	 */
1938	if ((sc->bce_phy_flags & BCE_PHY_IEEE_CLAUSE_45_FLAG) != 0) {
1939		if (reg >= MII_BMCR && reg <= MII_ANLPRNP)
1940			reg += 0x10;
1941	}
1942
1943	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1944		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1945		val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1946
1947		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1948		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1949
1950		DELAY(40);
1951	}
1952
1953	val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1954	    BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1955	    BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1956	REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1957
1958	for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1959		DELAY(10);
1960
1961		val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1962		if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1963			DELAY(5);
1964			break;
1965		}
1966	}
1967
1968	if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1969		BCE_PRINTF("%s(%d): PHY write timeout!\n",
1970		    __FILE__, __LINE__);
1971
1972	if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1973		val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1974		val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1975
1976		REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1977		REG_RD(sc, BCE_EMAC_MDIO_MODE);
1978
1979		DELAY(40);
1980	}
1981
1982	return 0;
1983}
1984
1985/****************************************************************************/
1986/* MII bus status change.                                                   */
1987/*                                                                          */
1988/* Called by the MII bus driver when the PHY establishes link to set the    */
1989/* MAC interface registers.                                                 */
1990/*                                                                          */
1991/* Returns:                                                                 */
1992/*   Nothing.                                                               */
1993/****************************************************************************/
1994static void
1995bce_miibus_statchg(device_t dev)
1996{
1997	struct bce_softc *sc;
1998	struct mii_data *mii;
1999	struct ifmediareq ifmr;
2000	int media_active, media_status, val;
2001
2002	sc = device_get_softc(dev);
2003
2004	DBENTER(BCE_VERBOSE_PHY);
2005
2006	if ((sc->bce_phy_flags & BCE_PHY_REMOTE_CAP_FLAG) != 0) {
2007		bzero(&ifmr, sizeof(ifmr));
2008		bce_ifmedia_sts_rphy(sc, &ifmr);
2009		media_active = ifmr.ifm_active;
2010		media_status = ifmr.ifm_status;
2011	} else {
2012		mii = device_get_softc(sc->bce_miibus);
2013		media_active = mii->mii_media_active;
2014		media_status = mii->mii_media_status;
2015	}
2016
2017	/* Ignore invalid media status. */
2018	if ((media_status & (IFM_ACTIVE | IFM_AVALID)) !=
2019	    (IFM_ACTIVE | IFM_AVALID))
2020		goto bce_miibus_statchg_exit;
2021
2022	val = REG_RD(sc, BCE_EMAC_MODE);
2023	val &= ~(BCE_EMAC_MODE_PORT | BCE_EMAC_MODE_HALF_DUPLEX |
2024	    BCE_EMAC_MODE_MAC_LOOP | BCE_EMAC_MODE_FORCE_LINK |
2025	    BCE_EMAC_MODE_25G);
2026
2027	/* Set MII or GMII interface based on the PHY speed. */
2028	switch (IFM_SUBTYPE(media_active)) {
2029	case IFM_10_T:
2030		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
2031			DBPRINT(sc, BCE_INFO_PHY,
2032			    "Enabling 10Mb interface.\n");
2033			val |= BCE_EMAC_MODE_PORT_MII_10;
2034			break;
2035		}
2036		/* fall-through */
2037	case IFM_100_TX:
2038		DBPRINT(sc, BCE_INFO_PHY, "Enabling MII interface.\n");
2039		val |= BCE_EMAC_MODE_PORT_MII;
2040		break;
2041	case IFM_2500_SX:
2042		DBPRINT(sc, BCE_INFO_PHY, "Enabling 2.5G MAC mode.\n");
2043		val |= BCE_EMAC_MODE_25G;
2044		/* fall-through */
2045	case IFM_1000_T:
2046	case IFM_1000_SX:
2047		DBPRINT(sc, BCE_INFO_PHY, "Enabling GMII interface.\n");
2048		val |= BCE_EMAC_MODE_PORT_GMII;
2049		break;
2050	default:
2051		DBPRINT(sc, BCE_INFO_PHY, "Unknown link speed, enabling "
2052		    "default GMII interface.\n");
2053		val |= BCE_EMAC_MODE_PORT_GMII;
2054	}
2055
2056	/* Set half or full duplex based on PHY settings. */
2057	if ((IFM_OPTIONS(media_active) & IFM_FDX) == 0) {
2058		DBPRINT(sc, BCE_INFO_PHY,
2059		    "Setting Half-Duplex interface.\n");
2060		val |= BCE_EMAC_MODE_HALF_DUPLEX;
2061	} else
2062		DBPRINT(sc, BCE_INFO_PHY,
2063		    "Setting Full-Duplex interface.\n");
2064
2065	REG_WR(sc, BCE_EMAC_MODE, val);
2066
2067	if ((IFM_OPTIONS(media_active) & IFM_ETH_RXPAUSE) != 0) {
2068		DBPRINT(sc, BCE_INFO_PHY,
2069		    "%s(): Enabling RX flow control.\n", __FUNCTION__);
2070		BCE_SETBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
2071		sc->bce_flags |= BCE_USING_RX_FLOW_CONTROL;
2072	} else {
2073		DBPRINT(sc, BCE_INFO_PHY,
2074		    "%s(): Disabling RX flow control.\n", __FUNCTION__);
2075		BCE_CLRBIT(sc, BCE_EMAC_RX_MODE, BCE_EMAC_RX_MODE_FLOW_EN);
2076		sc->bce_flags &= ~BCE_USING_RX_FLOW_CONTROL;
2077	}
2078
2079	if ((IFM_OPTIONS(media_active) & IFM_ETH_TXPAUSE) != 0) {
2080		DBPRINT(sc, BCE_INFO_PHY,
2081		    "%s(): Enabling TX flow control.\n", __FUNCTION__);
2082		BCE_SETBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
2083		sc->bce_flags |= BCE_USING_TX_FLOW_CONTROL;
2084	} else {
2085		DBPRINT(sc, BCE_INFO_PHY,
2086		    "%s(): Disabling TX flow control.\n", __FUNCTION__);
2087		BCE_CLRBIT(sc, BCE_EMAC_TX_MODE, BCE_EMAC_TX_MODE_FLOW_EN);
2088		sc->bce_flags &= ~BCE_USING_TX_FLOW_CONTROL;
2089	}
2090
2091	/* ToDo: Update watermarks in bce_init_rx_context(). */
2092
2093bce_miibus_statchg_exit:
2094	DBEXIT(BCE_VERBOSE_PHY);
2095}
2096
2097/****************************************************************************/
2098/* Acquire NVRAM lock.                                                      */
2099/*                                                                          */
2100/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
2101/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
2102/* for use by the driver.                                                   */
2103/*                                                                          */
2104/* Returns:                                                                 */
2105/*   0 on success, positive value on failure.                               */
2106/****************************************************************************/
2107static int
2108bce_acquire_nvram_lock(struct bce_softc *sc)
2109{
2110	u32 val;
2111	int j, rc = 0;
2112
2113	DBENTER(BCE_VERBOSE_NVRAM);
2114
2115	/* Request access to the flash interface. */
2116	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
2117	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2118		val = REG_RD(sc, BCE_NVM_SW_ARB);
2119		if (val & BCE_NVM_SW_ARB_ARB_ARB2)
2120			break;
2121
2122		DELAY(5);
2123	}
2124
2125	if (j >= NVRAM_TIMEOUT_COUNT) {
2126		DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
2127		rc = EBUSY;
2128	}
2129
2130	DBEXIT(BCE_VERBOSE_NVRAM);
2131	return (rc);
2132}
2133
2134/****************************************************************************/
2135/* Release NVRAM lock.                                                      */
2136/*                                                                          */
2137/* When the caller is finished accessing NVRAM the lock must be released.   */
2138/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
2139/* for use by the driver.                                                   */
2140/*                                                                          */
2141/* Returns:                                                                 */
2142/*   0 on success, positive value on failure.                               */
2143/****************************************************************************/
2144static int
2145bce_release_nvram_lock(struct bce_softc *sc)
2146{
2147	u32 val;
2148	int j, rc = 0;
2149
2150	DBENTER(BCE_VERBOSE_NVRAM);
2151
2152	/*
2153	 * Relinquish nvram interface.
2154	 */
2155	REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
2156
2157	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2158		val = REG_RD(sc, BCE_NVM_SW_ARB);
2159		if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
2160			break;
2161
2162		DELAY(5);
2163	}
2164
2165	if (j >= NVRAM_TIMEOUT_COUNT) {
2166		DBPRINT(sc, BCE_WARN, "Timeout releasing NVRAM lock!\n");
2167		rc = EBUSY;
2168	}
2169
2170	DBEXIT(BCE_VERBOSE_NVRAM);
2171	return (rc);
2172}
2173
2174#ifdef BCE_NVRAM_WRITE_SUPPORT
2175/****************************************************************************/
2176/* Enable NVRAM write access.                                               */
2177/*                                                                          */
2178/* Before writing to NVRAM the caller must enable NVRAM writes.             */
2179/*                                                                          */
2180/* Returns:                                                                 */
2181/*   0 on success, positive value on failure.                               */
2182/****************************************************************************/
2183static int
2184bce_enable_nvram_write(struct bce_softc *sc)
2185{
2186	u32 val;
2187	int rc = 0;
2188
2189	DBENTER(BCE_VERBOSE_NVRAM);
2190
2191	val = REG_RD(sc, BCE_MISC_CFG);
2192	REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
2193
2194	if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2195		int j;
2196
2197		REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2198		REG_WR(sc, BCE_NVM_COMMAND,	BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
2199
2200		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2201			DELAY(5);
2202
2203			val = REG_RD(sc, BCE_NVM_COMMAND);
2204			if (val & BCE_NVM_COMMAND_DONE)
2205				break;
2206		}
2207
2208		if (j >= NVRAM_TIMEOUT_COUNT) {
2209			DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
2210			rc = EBUSY;
2211		}
2212	}
2213
2214	DBENTER(BCE_VERBOSE_NVRAM);
2215	return (rc);
2216}
2217
2218/****************************************************************************/
2219/* Disable NVRAM write access.                                              */
2220/*                                                                          */
2221/* When the caller is finished writing to NVRAM write access must be        */
2222/* disabled.                                                                */
2223/*                                                                          */
2224/* Returns:                                                                 */
2225/*   Nothing.                                                               */
2226/****************************************************************************/
2227static void
2228bce_disable_nvram_write(struct bce_softc *sc)
2229{
2230	u32 val;
2231
2232	DBENTER(BCE_VERBOSE_NVRAM);
2233
2234	val = REG_RD(sc, BCE_MISC_CFG);
2235	REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
2236
2237	DBEXIT(BCE_VERBOSE_NVRAM);
2238
2239}
2240#endif
2241
2242/****************************************************************************/
2243/* Enable NVRAM access.                                                     */
2244/*                                                                          */
2245/* Before accessing NVRAM for read or write operations the caller must      */
2246/* enabled NVRAM access.                                                    */
2247/*                                                                          */
2248/* Returns:                                                                 */
2249/*   Nothing.                                                               */
2250/****************************************************************************/
2251static void
2252bce_enable_nvram_access(struct bce_softc *sc)
2253{
2254	u32 val;
2255
2256	DBENTER(BCE_VERBOSE_NVRAM);
2257
2258	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
2259	/* Enable both bits, even on read. */
2260	REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val |
2261	    BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
2262
2263	DBEXIT(BCE_VERBOSE_NVRAM);
2264}
2265
2266/****************************************************************************/
2267/* Disable NVRAM access.                                                    */
2268/*                                                                          */
2269/* When the caller is finished accessing NVRAM access must be disabled.     */
2270/*                                                                          */
2271/* Returns:                                                                 */
2272/*   Nothing.                                                               */
2273/****************************************************************************/
2274static void
2275bce_disable_nvram_access(struct bce_softc *sc)
2276{
2277	u32 val;
2278
2279	DBENTER(BCE_VERBOSE_NVRAM);
2280
2281	val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
2282
2283	/* Disable both bits, even after read. */
2284	REG_WR(sc, BCE_NVM_ACCESS_ENABLE, val &
2285	    ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
2286
2287	DBEXIT(BCE_VERBOSE_NVRAM);
2288}
2289
2290#ifdef BCE_NVRAM_WRITE_SUPPORT
2291/****************************************************************************/
2292/* Erase NVRAM page before writing.                                         */
2293/*                                                                          */
2294/* Non-buffered flash parts require that a page be erased before it is      */
2295/* written.                                                                 */
2296/*                                                                          */
2297/* Returns:                                                                 */
2298/*   0 on success, positive value on failure.                               */
2299/****************************************************************************/
2300static int
2301bce_nvram_erase_page(struct bce_softc *sc, u32 offset)
2302{
2303	u32 cmd;
2304	int j, rc = 0;
2305
2306	DBENTER(BCE_VERBOSE_NVRAM);
2307
2308	/* Buffered flash doesn't require an erase. */
2309	if (sc->bce_flash_info->flags & BCE_NV_BUFFERED)
2310		goto bce_nvram_erase_page_exit;
2311
2312	/* Build an erase command. */
2313	cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
2314	    BCE_NVM_COMMAND_DOIT;
2315
2316	/*
2317	 * Clear the DONE bit separately, set the NVRAM address to erase,
2318	 * and issue the erase command.
2319	 */
2320	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2321	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2322	REG_WR(sc, BCE_NVM_COMMAND, cmd);
2323
2324	/* Wait for completion. */
2325	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2326		u32 val;
2327
2328		DELAY(5);
2329
2330		val = REG_RD(sc, BCE_NVM_COMMAND);
2331		if (val & BCE_NVM_COMMAND_DONE)
2332			break;
2333	}
2334
2335	if (j >= NVRAM_TIMEOUT_COUNT) {
2336		DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
2337		rc = EBUSY;
2338	}
2339
2340bce_nvram_erase_page_exit:
2341	DBEXIT(BCE_VERBOSE_NVRAM);
2342	return (rc);
2343}
2344#endif /* BCE_NVRAM_WRITE_SUPPORT */
2345
2346/****************************************************************************/
2347/* Read a dword (32 bits) from NVRAM.                                       */
2348/*                                                                          */
2349/* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
2350/* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
2351/*                                                                          */
2352/* Returns:                                                                 */
2353/*   0 on success and the 32 bit value read, positive value on failure.     */
2354/****************************************************************************/
2355static int
2356bce_nvram_read_dword(struct bce_softc *sc,
2357    u32 offset, u8 *ret_val, u32 cmd_flags)
2358{
2359	u32 cmd;
2360	int i, rc = 0;
2361
2362	DBENTER(BCE_EXTREME_NVRAM);
2363
2364	/* Build the command word. */
2365	cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
2366
2367	/* Calculate the offset for buffered flash if translation is used. */
2368	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
2369		offset = ((offset / sc->bce_flash_info->page_size) <<
2370		    sc->bce_flash_info->page_bits) +
2371		    (offset % sc->bce_flash_info->page_size);
2372	}
2373
2374	/*
2375	 * Clear the DONE bit separately, set the address to read,
2376	 * and issue the read.
2377	 */
2378	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2379	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2380	REG_WR(sc, BCE_NVM_COMMAND, cmd);
2381
2382	/* Wait for completion. */
2383	for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
2384		u32 val;
2385
2386		DELAY(5);
2387
2388		val = REG_RD(sc, BCE_NVM_COMMAND);
2389		if (val & BCE_NVM_COMMAND_DONE) {
2390			val = REG_RD(sc, BCE_NVM_READ);
2391
2392			val = bce_be32toh(val);
2393			memcpy(ret_val, &val, 4);
2394			break;
2395		}
2396	}
2397
2398	/* Check for errors. */
2399	if (i >= NVRAM_TIMEOUT_COUNT) {
2400		BCE_PRINTF("%s(%d): Timeout error reading NVRAM at "
2401		    "offset 0x%08X!\n",	__FILE__, __LINE__, offset);
2402		rc = EBUSY;
2403	}
2404
2405	DBEXIT(BCE_EXTREME_NVRAM);
2406	return(rc);
2407}
2408
2409#ifdef BCE_NVRAM_WRITE_SUPPORT
2410/****************************************************************************/
2411/* Write a dword (32 bits) to NVRAM.                                        */
2412/*                                                                          */
2413/* Write a 32 bit word to NVRAM.  The caller is assumed to have already     */
2414/* obtained the NVRAM lock, enabled the controller for NVRAM access, and    */
2415/* enabled NVRAM write access.                                              */
2416/*                                                                          */
2417/* Returns:                                                                 */
2418/*   0 on success, positive value on failure.                               */
2419/****************************************************************************/
2420static int
2421bce_nvram_write_dword(struct bce_softc *sc, u32 offset, u8 *val,
2422	u32 cmd_flags)
2423{
2424	u32 cmd, val32;
2425	int j, rc = 0;
2426
2427	DBENTER(BCE_VERBOSE_NVRAM);
2428
2429	/* Build the command word. */
2430	cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
2431
2432	/* Calculate the offset for buffered flash if translation is used. */
2433	if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
2434		offset = ((offset / sc->bce_flash_info->page_size) <<
2435		    sc->bce_flash_info->page_bits) +
2436		    (offset % sc->bce_flash_info->page_size);
2437	}
2438
2439	/*
2440	 * Clear the DONE bit separately, convert NVRAM data to big-endian,
2441	 * set the NVRAM address to write, and issue the write command
2442	 */
2443	REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
2444	memcpy(&val32, val, 4);
2445	val32 = htobe32(val32);
2446	REG_WR(sc, BCE_NVM_WRITE, val32);
2447	REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
2448	REG_WR(sc, BCE_NVM_COMMAND, cmd);
2449
2450	/* Wait for completion. */
2451	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2452		DELAY(5);
2453
2454		if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
2455			break;
2456	}
2457	if (j >= NVRAM_TIMEOUT_COUNT) {
2458		BCE_PRINTF("%s(%d): Timeout error writing NVRAM at "
2459		    "offset 0x%08X\n", __FILE__, __LINE__, offset);
2460		rc = EBUSY;
2461	}
2462
2463	DBEXIT(BCE_VERBOSE_NVRAM);
2464	return (rc);
2465}
2466#endif /* BCE_NVRAM_WRITE_SUPPORT */
2467
2468/****************************************************************************/
2469/* Initialize NVRAM access.                                                 */
2470/*                                                                          */
2471/* Identify the NVRAM device in use and prepare the NVRAM interface to      */
2472/* access that device.                                                      */
2473/*                                                                          */
2474/* Returns:                                                                 */
2475/*   0 on success, positive value on failure.                               */
2476/****************************************************************************/
2477static int
2478bce_init_nvram(struct bce_softc *sc)
2479{
2480	u32 val;
2481	int j, entry_count, rc = 0;
2482	const struct flash_spec *flash;
2483
2484	DBENTER(BCE_VERBOSE_NVRAM);
2485
2486	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
2487		sc->bce_flash_info = &flash_5709;
2488		goto bce_init_nvram_get_flash_size;
2489	}
2490
2491	/* Determine the selected interface. */
2492	val = REG_RD(sc, BCE_NVM_CFG1);
2493
2494	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2495
2496	/*
2497	 * Flash reconfiguration is required to support additional
2498	 * NVRAM devices not directly supported in hardware.
2499	 * Check if the flash interface was reconfigured
2500	 * by the bootcode.
2501	 */
2502
2503	if (val & 0x40000000) {
2504		/* Flash interface reconfigured by bootcode. */
2505
2506		DBPRINT(sc,BCE_INFO_LOAD,
2507			"bce_init_nvram(): Flash WAS reconfigured.\n");
2508
2509		for (j = 0, flash = &flash_table[0]; j < entry_count;
2510		     j++, flash++) {
2511			if ((val & FLASH_BACKUP_STRAP_MASK) ==
2512			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2513				sc->bce_flash_info = flash;
2514				break;
2515			}
2516		}
2517	} else {
2518		/* Flash interface not yet reconfigured. */
2519		u32 mask;
2520
2521		DBPRINT(sc, BCE_INFO_LOAD, "%s(): Flash was NOT reconfigured.\n",
2522			__FUNCTION__);
2523
2524		if (val & (1 << 23))
2525			mask = FLASH_BACKUP_STRAP_MASK;
2526		else
2527			mask = FLASH_STRAP_MASK;
2528
2529		/* Look for the matching NVRAM device configuration data. */
2530		for (j = 0, flash = &flash_table[0]; j < entry_count; j++, flash++) {
2531			/* Check if the device matches any of the known devices. */
2532			if ((val & mask) == (flash->strapping & mask)) {
2533				/* Found a device match. */
2534				sc->bce_flash_info = flash;
2535
2536				/* Request access to the flash interface. */
2537				if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2538					return rc;
2539
2540				/* Reconfigure the flash interface. */
2541				bce_enable_nvram_access(sc);
2542				REG_WR(sc, BCE_NVM_CFG1, flash->config1);
2543				REG_WR(sc, BCE_NVM_CFG2, flash->config2);
2544				REG_WR(sc, BCE_NVM_CFG3, flash->config3);
2545				REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
2546				bce_disable_nvram_access(sc);
2547				bce_release_nvram_lock(sc);
2548
2549				break;
2550			}
2551		}
2552	}
2553
2554	/* Check if a matching device was found. */
2555	if (j == entry_count) {
2556		sc->bce_flash_info = NULL;
2557		BCE_PRINTF("%s(%d): Unknown Flash NVRAM found!\n",
2558		    __FILE__, __LINE__);
2559		DBEXIT(BCE_VERBOSE_NVRAM);
2560		return (ENODEV);
2561	}
2562
2563bce_init_nvram_get_flash_size:
2564	/* Write the flash config data to the shared memory interface. */
2565	val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2);
2566	val &= BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
2567	if (val)
2568		sc->bce_flash_size = val;
2569	else
2570		sc->bce_flash_size = sc->bce_flash_info->total_size;
2571
2572	DBPRINT(sc, BCE_INFO_LOAD, "%s(): Found %s, size = 0x%08X\n",
2573	    __FUNCTION__, sc->bce_flash_info->name,
2574	    sc->bce_flash_info->total_size);
2575
2576	DBEXIT(BCE_VERBOSE_NVRAM);
2577	return rc;
2578}
2579
2580/****************************************************************************/
2581/* Read an arbitrary range of data from NVRAM.                              */
2582/*                                                                          */
2583/* Prepares the NVRAM interface for access and reads the requested data     */
2584/* into the supplied buffer.                                                */
2585/*                                                                          */
2586/* Returns:                                                                 */
2587/*   0 on success and the data read, positive value on failure.             */
2588/****************************************************************************/
2589static int
2590bce_nvram_read(struct bce_softc *sc, u32 offset, u8 *ret_buf,
2591	int buf_size)
2592{
2593	int rc = 0;
2594	u32 cmd_flags, offset32, len32, extra;
2595
2596	DBENTER(BCE_VERBOSE_NVRAM);
2597
2598	if (buf_size == 0)
2599		goto bce_nvram_read_exit;
2600
2601	/* Request access to the flash interface. */
2602	if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2603		goto bce_nvram_read_exit;
2604
2605	/* Enable access to flash interface */
2606	bce_enable_nvram_access(sc);
2607
2608	len32 = buf_size;
2609	offset32 = offset;
2610	extra = 0;
2611
2612	cmd_flags = 0;
2613
2614	if (offset32 & 3) {
2615		u8 buf[4];
2616		u32 pre_len;
2617
2618		offset32 &= ~3;
2619		pre_len = 4 - (offset & 3);
2620
2621		if (pre_len >= len32) {
2622			pre_len = len32;
2623			cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
2624		}
2625		else {
2626			cmd_flags = BCE_NVM_COMMAND_FIRST;
2627		}
2628
2629		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2630
2631		if (rc)
2632			return rc;
2633
2634		memcpy(ret_buf, buf + (offset & 3), pre_len);
2635
2636		offset32 += 4;
2637		ret_buf += pre_len;
2638		len32 -= pre_len;
2639	}
2640
2641	if (len32 & 3) {
2642		extra = 4 - (len32 & 3);
2643		len32 = (len32 + 4) & ~3;
2644	}
2645
2646	if (len32 == 4) {
2647		u8 buf[4];
2648
2649		if (cmd_flags)
2650			cmd_flags = BCE_NVM_COMMAND_LAST;
2651		else
2652			cmd_flags = BCE_NVM_COMMAND_FIRST |
2653				    BCE_NVM_COMMAND_LAST;
2654
2655		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2656
2657		memcpy(ret_buf, buf, 4 - extra);
2658	}
2659	else if (len32 > 0) {
2660		u8 buf[4];
2661
2662		/* Read the first word. */
2663		if (cmd_flags)
2664			cmd_flags = 0;
2665		else
2666			cmd_flags = BCE_NVM_COMMAND_FIRST;
2667
2668		rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
2669
2670		/* Advance to the next dword. */
2671		offset32 += 4;
2672		ret_buf += 4;
2673		len32 -= 4;
2674
2675		while (len32 > 4 && rc == 0) {
2676			rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
2677
2678			/* Advance to the next dword. */
2679			offset32 += 4;
2680			ret_buf += 4;
2681			len32 -= 4;
2682		}
2683
2684		if (rc)
2685			goto bce_nvram_read_locked_exit;
2686
2687		cmd_flags = BCE_NVM_COMMAND_LAST;
2688		rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
2689
2690		memcpy(ret_buf, buf, 4 - extra);
2691	}
2692
2693bce_nvram_read_locked_exit:
2694	/* Disable access to flash interface and release the lock. */
2695	bce_disable_nvram_access(sc);
2696	bce_release_nvram_lock(sc);
2697
2698bce_nvram_read_exit:
2699	DBEXIT(BCE_VERBOSE_NVRAM);
2700	return rc;
2701}
2702
2703#ifdef BCE_NVRAM_WRITE_SUPPORT
2704/****************************************************************************/
2705/* Write an arbitrary range of data from NVRAM.                             */
2706/*                                                                          */
2707/* Prepares the NVRAM interface for write access and writes the requested   */
2708/* data from the supplied buffer.  The caller is responsible for            */
2709/* calculating any appropriate CRCs.                                        */
2710/*                                                                          */
2711/* Returns:                                                                 */
2712/*   0 on success, positive value on failure.                               */
2713/****************************************************************************/
2714static int
2715bce_nvram_write(struct bce_softc *sc, u32 offset, u8 *data_buf,
2716	int buf_size)
2717{
2718	u32 written, offset32, len32;
2719	u8 *buf, start[4], end[4];
2720	int rc = 0;
2721	int align_start, align_end;
2722
2723	DBENTER(BCE_VERBOSE_NVRAM);
2724
2725	buf = data_buf;
2726	offset32 = offset;
2727	len32 = buf_size;
2728	align_start = align_end = 0;
2729
2730	if ((align_start = (offset32 & 3))) {
2731		offset32 &= ~3;
2732		len32 += align_start;
2733		if ((rc = bce_nvram_read(sc, offset32, start, 4)))
2734			goto bce_nvram_write_exit;
2735	}
2736
2737	if (len32 & 3) {
2738	       	if ((len32 > 4) || !align_start) {
2739			align_end = 4 - (len32 & 3);
2740			len32 += align_end;
2741			if ((rc = bce_nvram_read(sc, offset32 + len32 - 4,
2742				end, 4))) {
2743				goto bce_nvram_write_exit;
2744			}
2745		}
2746	}
2747
2748	if (align_start || align_end) {
2749		buf = malloc(len32, M_DEVBUF, M_NOWAIT);
2750		if (buf == NULL) {
2751			rc = ENOMEM;
2752			goto bce_nvram_write_exit;
2753		}
2754
2755		if (align_start) {
2756			memcpy(buf, start, 4);
2757		}
2758
2759		if (align_end) {
2760			memcpy(buf + len32 - 4, end, 4);
2761		}
2762		memcpy(buf + align_start, data_buf, buf_size);
2763	}
2764
2765	written = 0;
2766	while ((written < len32) && (rc == 0)) {
2767		u32 page_start, page_end, data_start, data_end;
2768		u32 addr, cmd_flags;
2769		int i;
2770		u8 flash_buffer[264];
2771
2772	    /* Find the page_start addr */
2773		page_start = offset32 + written;
2774		page_start -= (page_start % sc->bce_flash_info->page_size);
2775		/* Find the page_end addr */
2776		page_end = page_start + sc->bce_flash_info->page_size;
2777		/* Find the data_start addr */
2778		data_start = (written == 0) ? offset32 : page_start;
2779		/* Find the data_end addr */
2780		data_end = (page_end > offset32 + len32) ?
2781			(offset32 + len32) : page_end;
2782
2783		/* Request access to the flash interface. */
2784		if ((rc = bce_acquire_nvram_lock(sc)) != 0)
2785			goto bce_nvram_write_exit;
2786
2787		/* Enable access to flash interface */
2788		bce_enable_nvram_access(sc);
2789
2790		cmd_flags = BCE_NVM_COMMAND_FIRST;
2791		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2792			int j;
2793
2794			/* Read the whole page into the buffer
2795			 * (non-buffer flash only) */
2796			for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
2797				if (j == (sc->bce_flash_info->page_size - 4)) {
2798					cmd_flags |= BCE_NVM_COMMAND_LAST;
2799				}
2800				rc = bce_nvram_read_dword(sc,
2801					page_start + j,
2802					&flash_buffer[j],
2803					cmd_flags);
2804
2805				if (rc)
2806					goto bce_nvram_write_locked_exit;
2807
2808				cmd_flags = 0;
2809			}
2810		}
2811
2812		/* Enable writes to flash interface (unlock write-protect) */
2813		if ((rc = bce_enable_nvram_write(sc)) != 0)
2814			goto bce_nvram_write_locked_exit;
2815
2816		/* Erase the page */
2817		if ((rc = bce_nvram_erase_page(sc, page_start)) != 0)
2818			goto bce_nvram_write_locked_exit;
2819
2820		/* Re-enable the write again for the actual write */
2821		bce_enable_nvram_write(sc);
2822
2823		/* Loop to write back the buffer data from page_start to
2824		 * data_start */
2825		i = 0;
2826		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2827			for (addr = page_start; addr < data_start;
2828				addr += 4, i += 4) {
2829				rc = bce_nvram_write_dword(sc, addr,
2830					&flash_buffer[i], cmd_flags);
2831
2832				if (rc != 0)
2833					goto bce_nvram_write_locked_exit;
2834
2835				cmd_flags = 0;
2836			}
2837		}
2838
2839		/* Loop to write the new data from data_start to data_end */
2840		for (addr = data_start; addr < data_end; addr += 4, i++) {
2841			if ((addr == page_end - 4) ||
2842				((sc->bce_flash_info->flags & BCE_NV_BUFFERED) &&
2843				(addr == data_end - 4))) {
2844				cmd_flags |= BCE_NVM_COMMAND_LAST;
2845			}
2846			rc = bce_nvram_write_dword(sc, addr, buf,
2847				cmd_flags);
2848
2849			if (rc != 0)
2850				goto bce_nvram_write_locked_exit;
2851
2852			cmd_flags = 0;
2853			buf += 4;
2854		}
2855
2856		/* Loop to write back the buffer data from data_end
2857		 * to page_end */
2858		if (!(sc->bce_flash_info->flags & BCE_NV_BUFFERED)) {
2859			for (addr = data_end; addr < page_end;
2860				addr += 4, i += 4) {
2861				if (addr == page_end-4) {
2862					cmd_flags = BCE_NVM_COMMAND_LAST;
2863                		}
2864				rc = bce_nvram_write_dword(sc, addr,
2865					&flash_buffer[i], cmd_flags);
2866
2867				if (rc != 0)
2868					goto bce_nvram_write_locked_exit;
2869
2870				cmd_flags = 0;
2871			}
2872		}
2873
2874		/* Disable writes to flash interface (lock write-protect) */
2875		bce_disable_nvram_write(sc);
2876
2877		/* Disable access to flash interface */
2878		bce_disable_nvram_access(sc);
2879		bce_release_nvram_lock(sc);
2880
2881		/* Increment written */
2882		written += data_end - data_start;
2883	}
2884
2885	goto bce_nvram_write_exit;
2886
2887bce_nvram_write_locked_exit:
2888	bce_disable_nvram_write(sc);
2889	bce_disable_nvram_access(sc);
2890	bce_release_nvram_lock(sc);
2891
2892bce_nvram_write_exit:
2893	if (align_start || align_end)
2894		free(buf, M_DEVBUF);
2895
2896	DBEXIT(BCE_VERBOSE_NVRAM);
2897	return (rc);
2898}
2899#endif /* BCE_NVRAM_WRITE_SUPPORT */
2900
2901/****************************************************************************/
2902/* Verifies that NVRAM is accessible and contains valid data.               */
2903/*                                                                          */
2904/* Reads the configuration data from NVRAM and verifies that the CRC is     */
2905/* correct.                                                                 */
2906/*                                                                          */
2907/* Returns:                                                                 */
2908/*   0 on success, positive value on failure.                               */
2909/****************************************************************************/
2910static int
2911bce_nvram_test(struct bce_softc *sc)
2912{
2913	u32 buf[BCE_NVRAM_SIZE / 4];
2914	u8 *data = (u8 *) buf;
2915	int rc = 0;
2916	u32 magic, csum;
2917
2918	DBENTER(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2919
2920	/*
2921	 * Check that the device NVRAM is valid by reading
2922	 * the magic value at offset 0.
2923	 */
2924	if ((rc = bce_nvram_read(sc, 0, data, 4)) != 0) {
2925		BCE_PRINTF("%s(%d): Unable to read NVRAM!\n",
2926		    __FILE__, __LINE__);
2927		goto bce_nvram_test_exit;
2928	}
2929
2930	/*
2931	 * Verify that offset 0 of the NVRAM contains
2932	 * a valid magic number.
2933	 */
2934	magic = bce_be32toh(buf[0]);
2935	if (magic != BCE_NVRAM_MAGIC) {
2936		rc = ENODEV;
2937		BCE_PRINTF("%s(%d): Invalid NVRAM magic value! "
2938		    "Expected: 0x%08X, Found: 0x%08X\n",
2939		    __FILE__, __LINE__, BCE_NVRAM_MAGIC, magic);
2940		goto bce_nvram_test_exit;
2941	}
2942
2943	/*
2944	 * Verify that the device NVRAM includes valid
2945	 * configuration data.
2946	 */
2947	if ((rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE)) != 0) {
2948		BCE_PRINTF("%s(%d): Unable to read manufacturing "
2949		    "Information from  NVRAM!\n", __FILE__, __LINE__);
2950		goto bce_nvram_test_exit;
2951	}
2952
2953	csum = ether_crc32_le(data, 0x100);
2954	if (csum != BCE_CRC32_RESIDUAL) {
2955		rc = ENODEV;
2956		BCE_PRINTF("%s(%d): Invalid manufacturing information "
2957		    "NVRAM CRC!	Expected: 0x%08X, Found: 0x%08X\n",
2958		    __FILE__, __LINE__, BCE_CRC32_RESIDUAL, csum);
2959		goto bce_nvram_test_exit;
2960	}
2961
2962	csum = ether_crc32_le(data + 0x100, 0x100);
2963	if (csum != BCE_CRC32_RESIDUAL) {
2964		rc = ENODEV;
2965		BCE_PRINTF("%s(%d): Invalid feature configuration "
2966		    "information NVRAM CRC! Expected: 0x%08X, "
2967		    "Found: 08%08X\n", __FILE__, __LINE__,
2968		    BCE_CRC32_RESIDUAL, csum);
2969	}
2970
2971bce_nvram_test_exit:
2972	DBEXIT(BCE_VERBOSE_NVRAM | BCE_VERBOSE_LOAD | BCE_VERBOSE_RESET);
2973	return rc;
2974}
2975
2976/****************************************************************************/
2977/* Calculates the size of the buffers to allocate based on the MTU.         */
2978/*                                                                          */
2979/* Returns:                                                                 */
2980/*   Nothing.                                                               */
2981/****************************************************************************/
2982static void
2983bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu)
2984{
2985	DBENTER(BCE_VERBOSE_LOAD);
2986
2987	/* Use a single allocation type when header splitting enabled. */
2988	if (bce_hdr_split == TRUE) {
2989		sc->rx_bd_mbuf_alloc_size = MHLEN;
2990		/* Make sure offset is 16 byte aligned for hardware. */
2991		sc->rx_bd_mbuf_align_pad =
2992			roundup2(MSIZE - MHLEN, 16) - (MSIZE - MHLEN);
2993		sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
2994			sc->rx_bd_mbuf_align_pad;
2995	} else {
2996		if ((mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2997		    ETHER_CRC_LEN) > MCLBYTES) {
2998			/* Setup for jumbo RX buffer allocations. */
2999			sc->rx_bd_mbuf_alloc_size = MJUM9BYTES;
3000			sc->rx_bd_mbuf_align_pad  =
3001				roundup2(MJUM9BYTES, 16) - MJUM9BYTES;
3002			sc->rx_bd_mbuf_data_len =
3003			    sc->rx_bd_mbuf_alloc_size -
3004			    sc->rx_bd_mbuf_align_pad;
3005		} else {
3006			/* Setup for standard RX buffer allocations. */
3007			sc->rx_bd_mbuf_alloc_size = MCLBYTES;
3008			sc->rx_bd_mbuf_align_pad  =
3009			    roundup2(MCLBYTES, 16) - MCLBYTES;
3010			sc->rx_bd_mbuf_data_len =
3011			    sc->rx_bd_mbuf_alloc_size -
3012			    sc->rx_bd_mbuf_align_pad;
3013		}
3014	}
3015
3016//	DBPRINT(sc, BCE_INFO_LOAD,
3017	DBPRINT(sc, BCE_WARN,
3018	   "%s(): rx_bd_mbuf_alloc_size = %d, rx_bd_mbuf_data_len = %d, "
3019	   "rx_bd_mbuf_align_pad = %d\n", __FUNCTION__,
3020	   sc->rx_bd_mbuf_alloc_size, sc->rx_bd_mbuf_data_len,
3021	   sc->rx_bd_mbuf_align_pad);
3022
3023	DBEXIT(BCE_VERBOSE_LOAD);
3024}
3025
3026/****************************************************************************/
3027/* Identifies the current media type of the controller and sets the PHY     */
3028/* address.                                                                 */
3029/*                                                                          */
3030/* Returns:                                                                 */
3031/*   Nothing.                                                               */
3032/****************************************************************************/
3033static void
3034bce_get_media(struct bce_softc *sc)
3035{
3036	u32 val;
3037
3038	DBENTER(BCE_VERBOSE_PHY);
3039
3040	/* Assume PHY address for copper controllers. */
3041	sc->bce_phy_addr = 1;
3042
3043	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
3044 		u32 val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
3045		u32 bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
3046		u32 strap;
3047
3048		/*
3049		 * The BCM5709S is software configurable
3050		 * for Copper or SerDes operation.
3051		 */
3052		if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
3053			DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded "
3054			    "for copper.\n");
3055			goto bce_get_media_exit;
3056		} else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
3057			DBPRINT(sc, BCE_INFO_LOAD, "5709 bonded "
3058			    "for dual media.\n");
3059			sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
3060			goto bce_get_media_exit;
3061		}
3062
3063		if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
3064			strap = (val &
3065			    BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
3066		else
3067			strap = (val &
3068			    BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
3069
3070		if (pci_get_function(sc->bce_dev) == 0) {
3071			switch (strap) {
3072			case 0x4:
3073			case 0x5:
3074			case 0x6:
3075				DBPRINT(sc, BCE_INFO_LOAD,
3076				    "BCM5709 s/w configured for SerDes.\n");
3077				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
3078				break;
3079			default:
3080				DBPRINT(sc, BCE_INFO_LOAD,
3081				    "BCM5709 s/w configured for Copper.\n");
3082				break;
3083			}
3084		} else {
3085			switch (strap) {
3086			case 0x1:
3087			case 0x2:
3088			case 0x4:
3089				DBPRINT(sc, BCE_INFO_LOAD,
3090				    "BCM5709 s/w configured for SerDes.\n");
3091				sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
3092				break;
3093			default:
3094				DBPRINT(sc, BCE_INFO_LOAD,
3095				    "BCM5709 s/w configured for Copper.\n");
3096				break;
3097			}
3098		}
3099
3100	} else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT)
3101		sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
3102
3103	if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
3104		sc->bce_flags |= BCE_NO_WOL_FLAG;
3105
3106		if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
3107			sc->bce_phy_flags |= BCE_PHY_IEEE_CLAUSE_45_FLAG;
3108
3109		if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
3110			/* 5708S/09S/16S use a separate PHY for SerDes. */
3111			sc->bce_phy_addr = 2;
3112
3113			val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
3114			if (val & BCE_SHARED_HW_CFG_PHY_2_5G) {
3115				sc->bce_phy_flags |=
3116				    BCE_PHY_2_5G_CAPABLE_FLAG;
3117				DBPRINT(sc, BCE_INFO_LOAD, "Found 2.5Gb "
3118				    "capable adapter\n");
3119			}
3120		}
3121	} else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
3122	    (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708))
3123		sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
3124
3125bce_get_media_exit:
3126	DBPRINT(sc, (BCE_INFO_LOAD | BCE_INFO_PHY),
3127		"Using PHY address %d.\n", sc->bce_phy_addr);
3128
3129	DBEXIT(BCE_VERBOSE_PHY);
3130}
3131
3132/****************************************************************************/
3133/* Performs PHY initialization required before MII drivers access the       */
3134/* device.                                                                  */
3135/*                                                                          */
3136/* Returns:                                                                 */
3137/*   Nothing.                                                               */
3138/****************************************************************************/
3139static void
3140bce_init_media(struct bce_softc *sc)
3141{
3142	if ((sc->bce_phy_flags & (BCE_PHY_IEEE_CLAUSE_45_FLAG |
3143	    BCE_PHY_REMOTE_CAP_FLAG)) == BCE_PHY_IEEE_CLAUSE_45_FLAG) {
3144		/*
3145		 * Configure 5709S/5716S PHYs to use traditional IEEE
3146		 * Clause 22 method. Otherwise we have no way to attach
3147		 * the PHY in mii(4) layer. PHY specific configuration
3148		 * is done in mii layer.
3149		 */
3150
3151		/* Select auto-negotiation MMD of the PHY. */
3152		bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
3153		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_ADDR_EXT);
3154		bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
3155		    BRGPHY_ADDR_EXT, BRGPHY_ADDR_EXT_AN_MMD);
3156
3157		/* Set IEEE0 block of AN MMD (assumed in brgphy(4) code). */
3158		bce_miibus_write_reg(sc->bce_dev, sc->bce_phy_addr,
3159		    BRGPHY_BLOCK_ADDR, BRGPHY_BLOCK_ADDR_COMBO_IEEE0);
3160	}
3161}
3162
3163/****************************************************************************/
3164/* Free any DMA memory owned by the driver.                                 */
3165/*                                                                          */
3166/* Scans through each data structre that requires DMA memory and frees      */
3167/* the memory if allocated.                                                 */
3168/*                                                                          */
3169/* Returns:                                                                 */
3170/*   Nothing.                                                               */
3171/****************************************************************************/
3172static void
3173bce_dma_free(struct bce_softc *sc)
3174{
3175	int i;
3176
3177	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
3178
3179	/* Free, unmap, and destroy the status block. */
3180	if (sc->status_block_paddr != 0) {
3181		bus_dmamap_unload(
3182		    sc->status_tag,
3183		    sc->status_map);
3184		sc->status_block_paddr = 0;
3185	}
3186
3187	if (sc->status_block != NULL) {
3188		bus_dmamem_free(
3189		   sc->status_tag,
3190		    sc->status_block,
3191		    sc->status_map);
3192		sc->status_block = NULL;
3193	}
3194
3195	if (sc->status_tag != NULL) {
3196		bus_dma_tag_destroy(sc->status_tag);
3197		sc->status_tag = NULL;
3198	}
3199
3200	/* Free, unmap, and destroy the statistics block. */
3201	if (sc->stats_block_paddr != 0) {
3202		bus_dmamap_unload(
3203		    sc->stats_tag,
3204		    sc->stats_map);
3205		sc->stats_block_paddr = 0;
3206	}
3207
3208	if (sc->stats_block != NULL) {
3209		bus_dmamem_free(
3210		    sc->stats_tag,
3211		    sc->stats_block,
3212		    sc->stats_map);
3213		sc->stats_block = NULL;
3214	}
3215
3216	if (sc->stats_tag != NULL) {
3217		bus_dma_tag_destroy(sc->stats_tag);
3218		sc->stats_tag = NULL;
3219	}
3220
3221	/* Free, unmap and destroy all context memory pages. */
3222	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
3223		for (i = 0; i < sc->ctx_pages; i++ ) {
3224			if (sc->ctx_paddr[i] != 0) {
3225				bus_dmamap_unload(
3226				    sc->ctx_tag,
3227				    sc->ctx_map[i]);
3228				sc->ctx_paddr[i] = 0;
3229			}
3230
3231			if (sc->ctx_block[i] != NULL) {
3232				bus_dmamem_free(
3233				    sc->ctx_tag,
3234				    sc->ctx_block[i],
3235				    sc->ctx_map[i]);
3236				sc->ctx_block[i] = NULL;
3237			}
3238		}
3239
3240		/* Destroy the context memory tag. */
3241		if (sc->ctx_tag != NULL) {
3242			bus_dma_tag_destroy(sc->ctx_tag);
3243			sc->ctx_tag = NULL;
3244		}
3245	}
3246
3247	/* Free, unmap and destroy all TX buffer descriptor chain pages. */
3248	for (i = 0; i < sc->tx_pages; i++ ) {
3249		if (sc->tx_bd_chain_paddr[i] != 0) {
3250			bus_dmamap_unload(
3251			    sc->tx_bd_chain_tag,
3252			    sc->tx_bd_chain_map[i]);
3253			sc->tx_bd_chain_paddr[i] = 0;
3254		}
3255
3256		if (sc->tx_bd_chain[i] != NULL) {
3257			bus_dmamem_free(
3258			    sc->tx_bd_chain_tag,
3259			    sc->tx_bd_chain[i],
3260			    sc->tx_bd_chain_map[i]);
3261			sc->tx_bd_chain[i] = NULL;
3262		}
3263	}
3264
3265	/* Destroy the TX buffer descriptor tag. */
3266	if (sc->tx_bd_chain_tag != NULL) {
3267		bus_dma_tag_destroy(sc->tx_bd_chain_tag);
3268		sc->tx_bd_chain_tag = NULL;
3269	}
3270
3271	/* Free, unmap and destroy all RX buffer descriptor chain pages. */
3272	for (i = 0; i < sc->rx_pages; i++ ) {
3273		if (sc->rx_bd_chain_paddr[i] != 0) {
3274			bus_dmamap_unload(
3275			    sc->rx_bd_chain_tag,
3276			    sc->rx_bd_chain_map[i]);
3277			sc->rx_bd_chain_paddr[i] = 0;
3278		}
3279
3280		if (sc->rx_bd_chain[i] != NULL) {
3281			bus_dmamem_free(
3282			    sc->rx_bd_chain_tag,
3283			    sc->rx_bd_chain[i],
3284			    sc->rx_bd_chain_map[i]);
3285			sc->rx_bd_chain[i] = NULL;
3286		}
3287	}
3288
3289	/* Destroy the RX buffer descriptor tag. */
3290	if (sc->rx_bd_chain_tag != NULL) {
3291		bus_dma_tag_destroy(sc->rx_bd_chain_tag);
3292		sc->rx_bd_chain_tag = NULL;
3293	}
3294
3295	/* Free, unmap and destroy all page buffer descriptor chain pages. */
3296	if (bce_hdr_split == TRUE) {
3297		for (i = 0; i < sc->pg_pages; i++ ) {
3298			if (sc->pg_bd_chain_paddr[i] != 0) {
3299				bus_dmamap_unload(
3300				    sc->pg_bd_chain_tag,
3301				    sc->pg_bd_chain_map[i]);
3302				sc->pg_bd_chain_paddr[i] = 0;
3303			}
3304
3305			if (sc->pg_bd_chain[i] != NULL) {
3306				bus_dmamem_free(
3307				    sc->pg_bd_chain_tag,
3308				    sc->pg_bd_chain[i],
3309				    sc->pg_bd_chain_map[i]);
3310				sc->pg_bd_chain[i] = NULL;
3311			}
3312		}
3313
3314		/* Destroy the page buffer descriptor tag. */
3315		if (sc->pg_bd_chain_tag != NULL) {
3316			bus_dma_tag_destroy(sc->pg_bd_chain_tag);
3317			sc->pg_bd_chain_tag = NULL;
3318		}
3319	}
3320
3321	/* Unload and destroy the TX mbuf maps. */
3322	for (i = 0; i < MAX_TX_BD_AVAIL; i++) {
3323		if (sc->tx_mbuf_map[i] != NULL) {
3324			bus_dmamap_unload(sc->tx_mbuf_tag,
3325			    sc->tx_mbuf_map[i]);
3326			bus_dmamap_destroy(sc->tx_mbuf_tag,
3327	 		    sc->tx_mbuf_map[i]);
3328			sc->tx_mbuf_map[i] = NULL;
3329		}
3330	}
3331
3332	/* Destroy the TX mbuf tag. */
3333	if (sc->tx_mbuf_tag != NULL) {
3334		bus_dma_tag_destroy(sc->tx_mbuf_tag);
3335		sc->tx_mbuf_tag = NULL;
3336	}
3337
3338	/* Unload and destroy the RX mbuf maps. */
3339	for (i = 0; i < MAX_RX_BD_AVAIL; i++) {
3340		if (sc->rx_mbuf_map[i] != NULL) {
3341			bus_dmamap_unload(sc->rx_mbuf_tag,
3342			    sc->rx_mbuf_map[i]);
3343			bus_dmamap_destroy(sc->rx_mbuf_tag,
3344	 		    sc->rx_mbuf_map[i]);
3345			sc->rx_mbuf_map[i] = NULL;
3346		}
3347	}
3348
3349	/* Destroy the RX mbuf tag. */
3350	if (sc->rx_mbuf_tag != NULL) {
3351		bus_dma_tag_destroy(sc->rx_mbuf_tag);
3352		sc->rx_mbuf_tag = NULL;
3353	}
3354
3355	/* Unload and destroy the page mbuf maps. */
3356	if (bce_hdr_split == TRUE) {
3357		for (i = 0; i < MAX_PG_BD_AVAIL; i++) {
3358			if (sc->pg_mbuf_map[i] != NULL) {
3359				bus_dmamap_unload(sc->pg_mbuf_tag,
3360				    sc->pg_mbuf_map[i]);
3361				bus_dmamap_destroy(sc->pg_mbuf_tag,
3362				    sc->pg_mbuf_map[i]);
3363				sc->pg_mbuf_map[i] = NULL;
3364			}
3365		}
3366
3367		/* Destroy the page mbuf tag. */
3368		if (sc->pg_mbuf_tag != NULL) {
3369			bus_dma_tag_destroy(sc->pg_mbuf_tag);
3370			sc->pg_mbuf_tag = NULL;
3371		}
3372	}
3373
3374	/* Destroy the parent tag */
3375	if (sc->parent_tag != NULL) {
3376		bus_dma_tag_destroy(sc->parent_tag);
3377		sc->parent_tag = NULL;
3378	}
3379
3380	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_UNLOAD | BCE_VERBOSE_CTX);
3381}
3382
3383/****************************************************************************/
3384/* Get DMA memory from the OS.                                              */
3385/*                                                                          */
3386/* Validates that the OS has provided DMA buffers in response to a          */
3387/* bus_dmamap_load() call and saves the physical address of those buffers.  */
3388/* When the callback is used the OS will return 0 for the mapping function  */
3389/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
3390/* failures back to the caller.                                             */
3391/*                                                                          */
3392/* Returns:                                                                 */
3393/*   Nothing.                                                               */
3394/****************************************************************************/
3395static void
3396bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3397{
3398	bus_addr_t *busaddr = arg;
3399
3400	KASSERT(nseg == 1, ("%s(): Too many segments returned (%d)!",
3401	    __FUNCTION__, nseg));
3402	/* Simulate a mapping failure. */
3403	DBRUNIF(DB_RANDOMTRUE(dma_map_addr_failed_sim_control),
3404	    error = ENOMEM);
3405
3406	/* ToDo: How to increment debug sim_count variable here? */
3407
3408	/* Check for an error and signal the caller that an error occurred. */
3409	if (error) {
3410		*busaddr = 0;
3411	} else {
3412		*busaddr = segs->ds_addr;
3413	}
3414}
3415
3416/****************************************************************************/
3417/* Allocate any DMA memory needed by the driver.                            */
3418/*                                                                          */
3419/* Allocates DMA memory needed for the various global structures needed by  */
3420/* hardware.                                                                */
3421/*                                                                          */
3422/* Memory alignment requirements:                                           */
3423/* +-----------------+----------+----------+----------+----------+          */
3424/* |                 |   5706   |   5708   |   5709   |   5716   |          */
3425/* +-----------------+----------+----------+----------+----------+          */
3426/* |Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |          */
3427/* |Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |          */
3428/* |RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |          */
3429/* |PG Buffers       |   none   |   none   |   none   |   none   |          */
3430/* |TX Buffers       |   none   |   none   |   none   |   none   |          */
3431/* |Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |          */
3432/* |Context Memory   |          |          |          |          |          */
3433/* +-----------------+----------+----------+----------+----------+          */
3434/*                                                                          */
3435/* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
3436/*                                                                          */
3437/* Returns:                                                                 */
3438/*   0 for success, positive value for failure.                             */
3439/****************************************************************************/
3440static int
3441bce_dma_alloc(device_t dev)
3442{
3443	struct bce_softc *sc;
3444	int i, error, rc = 0;
3445	bus_size_t max_size, max_seg_size;
3446	int max_segments;
3447
3448	sc = device_get_softc(dev);
3449
3450	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3451
3452	/*
3453	 * Allocate the parent bus DMA tag appropriate for PCI.
3454	 */
3455	if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, BCE_DMA_BOUNDARY,
3456	    sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3457	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
3458	    &sc->parent_tag)) {
3459		BCE_PRINTF("%s(%d): Could not allocate parent DMA tag!\n",
3460		    __FILE__, __LINE__);
3461		rc = ENOMEM;
3462		goto bce_dma_alloc_exit;
3463	}
3464
3465	/*
3466	 * Create a DMA tag for the status block, allocate and clear the
3467	 * memory, map the memory into DMA space, and fetch the physical
3468	 * address of the block.
3469	 */
3470	if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN,
3471	    BCE_DMA_BOUNDARY, sc->max_bus_addr,	BUS_SPACE_MAXADDR,
3472	    NULL, NULL,	BCE_STATUS_BLK_SZ, 1, BCE_STATUS_BLK_SZ,
3473	    0, NULL, NULL, &sc->status_tag)) {
3474		BCE_PRINTF("%s(%d): Could not allocate status block "
3475		    "DMA tag!\n", __FILE__, __LINE__);
3476		rc = ENOMEM;
3477		goto bce_dma_alloc_exit;
3478	}
3479
3480	if(bus_dmamem_alloc(sc->status_tag, (void **)&sc->status_block,
3481	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3482	    &sc->status_map)) {
3483		BCE_PRINTF("%s(%d): Could not allocate status block "
3484		    "DMA memory!\n", __FILE__, __LINE__);
3485		rc = ENOMEM;
3486		goto bce_dma_alloc_exit;
3487	}
3488
3489	error = bus_dmamap_load(sc->status_tag,	sc->status_map,
3490	    sc->status_block, BCE_STATUS_BLK_SZ, bce_dma_map_addr,
3491	    &sc->status_block_paddr, BUS_DMA_NOWAIT);
3492
3493	if (error || sc->status_block_paddr == 0) {
3494		BCE_PRINTF("%s(%d): Could not map status block "
3495		    "DMA memory!\n", __FILE__, __LINE__);
3496		rc = ENOMEM;
3497		goto bce_dma_alloc_exit;
3498	}
3499
3500	DBPRINT(sc, BCE_INFO_LOAD, "%s(): status_block_paddr = 0x%jX\n",
3501	    __FUNCTION__, (uintmax_t) sc->status_block_paddr);
3502
3503	/*
3504	 * Create a DMA tag for the statistics block, allocate and clear the
3505	 * memory, map the memory into DMA space, and fetch the physical
3506	 * address of the block.
3507	 */
3508	if (bus_dma_tag_create(sc->parent_tag, BCE_DMA_ALIGN,
3509	    BCE_DMA_BOUNDARY, sc->max_bus_addr,	BUS_SPACE_MAXADDR,
3510	    NULL, NULL,	BCE_STATS_BLK_SZ, 1, BCE_STATS_BLK_SZ,
3511	    0, NULL, NULL, &sc->stats_tag)) {
3512		BCE_PRINTF("%s(%d): Could not allocate statistics block "
3513		    "DMA tag!\n", __FILE__, __LINE__);
3514		rc = ENOMEM;
3515		goto bce_dma_alloc_exit;
3516	}
3517
3518	if (bus_dmamem_alloc(sc->stats_tag, (void **)&sc->stats_block,
3519	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->stats_map)) {
3520		BCE_PRINTF("%s(%d): Could not allocate statistics block "
3521		    "DMA memory!\n", __FILE__, __LINE__);
3522		rc = ENOMEM;
3523		goto bce_dma_alloc_exit;
3524	}
3525
3526	error = bus_dmamap_load(sc->stats_tag, sc->stats_map,
3527	    sc->stats_block, BCE_STATS_BLK_SZ, bce_dma_map_addr,
3528	    &sc->stats_block_paddr, BUS_DMA_NOWAIT);
3529
3530	if (error || sc->stats_block_paddr == 0) {
3531		BCE_PRINTF("%s(%d): Could not map statistics block "
3532		    "DMA memory!\n", __FILE__, __LINE__);
3533		rc = ENOMEM;
3534		goto bce_dma_alloc_exit;
3535	}
3536
3537	DBPRINT(sc, BCE_INFO_LOAD, "%s(): stats_block_paddr = 0x%jX\n",
3538	    __FUNCTION__, (uintmax_t) sc->stats_block_paddr);
3539
3540	/* BCM5709 uses host memory as cache for context memory. */
3541	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
3542		sc->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
3543		if (sc->ctx_pages == 0)
3544			sc->ctx_pages = 1;
3545
3546		DBRUNIF((sc->ctx_pages > 512),
3547		    BCE_PRINTF("%s(%d): Too many CTX pages! %d > 512\n",
3548		    __FILE__, __LINE__, sc->ctx_pages));
3549
3550		/*
3551		 * Create a DMA tag for the context pages,
3552		 * allocate and clear the memory, map the
3553		 * memory into DMA space, and fetch the
3554		 * physical address of the block.
3555		 */
3556		if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3557		    BCE_DMA_BOUNDARY, sc->max_bus_addr,	BUS_SPACE_MAXADDR,
3558		    NULL, NULL,	BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
3559		    0, NULL, NULL, &sc->ctx_tag)) {
3560			BCE_PRINTF("%s(%d): Could not allocate CTX "
3561			    "DMA tag!\n", __FILE__, __LINE__);
3562			rc = ENOMEM;
3563			goto bce_dma_alloc_exit;
3564		}
3565
3566		for (i = 0; i < sc->ctx_pages; i++) {
3567			if(bus_dmamem_alloc(sc->ctx_tag,
3568			    (void **)&sc->ctx_block[i],
3569			    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3570			    &sc->ctx_map[i])) {
3571				BCE_PRINTF("%s(%d): Could not allocate CTX "
3572				    "DMA memory!\n", __FILE__, __LINE__);
3573				rc = ENOMEM;
3574				goto bce_dma_alloc_exit;
3575			}
3576
3577			error = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
3578			    sc->ctx_block[i], BCM_PAGE_SIZE, bce_dma_map_addr,
3579			    &sc->ctx_paddr[i], BUS_DMA_NOWAIT);
3580
3581			if (error || sc->ctx_paddr[i] == 0) {
3582				BCE_PRINTF("%s(%d): Could not map CTX "
3583				    "DMA memory!\n", __FILE__, __LINE__);
3584				rc = ENOMEM;
3585				goto bce_dma_alloc_exit;
3586			}
3587
3588			DBPRINT(sc, BCE_INFO_LOAD, "%s(): ctx_paddr[%d] "
3589			    "= 0x%jX\n", __FUNCTION__, i,
3590			    (uintmax_t) sc->ctx_paddr[i]);
3591		}
3592	}
3593
3594	/*
3595	 * Create a DMA tag for the TX buffer descriptor chain,
3596	 * allocate and clear the  memory, and fetch the
3597	 * physical address of the block.
3598	 */
3599	if(bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, BCE_DMA_BOUNDARY,
3600	    sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3601	    BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ, 0,
3602	    NULL, NULL,	&sc->tx_bd_chain_tag)) {
3603		BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3604		    "chain DMA tag!\n", __FILE__, __LINE__);
3605		rc = ENOMEM;
3606		goto bce_dma_alloc_exit;
3607	}
3608
3609	for (i = 0; i < sc->tx_pages; i++) {
3610		if(bus_dmamem_alloc(sc->tx_bd_chain_tag,
3611		    (void **)&sc->tx_bd_chain[i],
3612		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3613		    &sc->tx_bd_chain_map[i])) {
3614			BCE_PRINTF("%s(%d): Could not allocate TX descriptor "
3615			    "chain DMA memory!\n", __FILE__, __LINE__);
3616			rc = ENOMEM;
3617			goto bce_dma_alloc_exit;
3618		}
3619
3620		error = bus_dmamap_load(sc->tx_bd_chain_tag,
3621		    sc->tx_bd_chain_map[i], sc->tx_bd_chain[i],
3622		    BCE_TX_CHAIN_PAGE_SZ, bce_dma_map_addr,
3623		    &sc->tx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3624
3625		if (error || sc->tx_bd_chain_paddr[i] == 0) {
3626			BCE_PRINTF("%s(%d): Could not map TX descriptor "
3627			    "chain DMA memory!\n", __FILE__, __LINE__);
3628			rc = ENOMEM;
3629			goto bce_dma_alloc_exit;
3630		}
3631
3632		DBPRINT(sc, BCE_INFO_LOAD, "%s(): tx_bd_chain_paddr[%d] = "
3633		    "0x%jX\n", __FUNCTION__, i,
3634		    (uintmax_t) sc->tx_bd_chain_paddr[i]);
3635	}
3636
3637	/* Check the required size before mapping to conserve resources. */
3638	if (bce_tso_enable) {
3639		max_size     = BCE_TSO_MAX_SIZE;
3640		max_segments = BCE_MAX_SEGMENTS;
3641		max_seg_size = BCE_TSO_MAX_SEG_SIZE;
3642	} else {
3643		max_size     = MCLBYTES * BCE_MAX_SEGMENTS;
3644		max_segments = BCE_MAX_SEGMENTS;
3645		max_seg_size = MCLBYTES;
3646	}
3647
3648	/* Create a DMA tag for TX mbufs. */
3649	if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
3650	    sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, max_size,
3651	    max_segments, max_seg_size,	0, NULL, NULL, &sc->tx_mbuf_tag)) {
3652		BCE_PRINTF("%s(%d): Could not allocate TX mbuf DMA tag!\n",
3653		    __FILE__, __LINE__);
3654		rc = ENOMEM;
3655		goto bce_dma_alloc_exit;
3656	}
3657
3658	/* Create DMA maps for the TX mbufs clusters. */
3659	for (i = 0; i < TOTAL_TX_BD_ALLOC; i++) {
3660		if (bus_dmamap_create(sc->tx_mbuf_tag, BUS_DMA_NOWAIT,
3661			&sc->tx_mbuf_map[i])) {
3662			BCE_PRINTF("%s(%d): Unable to create TX mbuf DMA "
3663			    "map!\n", __FILE__, __LINE__);
3664			rc = ENOMEM;
3665			goto bce_dma_alloc_exit;
3666		}
3667	}
3668
3669	/*
3670	 * Create a DMA tag for the RX buffer descriptor chain,
3671	 * allocate and clear the memory, and fetch the physical
3672	 * address of the blocks.
3673	 */
3674	if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3675			BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR,
3676			sc->max_bus_addr, NULL, NULL,
3677			BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
3678			0, NULL, NULL, &sc->rx_bd_chain_tag)) {
3679		BCE_PRINTF("%s(%d): Could not allocate RX descriptor chain "
3680		    "DMA tag!\n", __FILE__, __LINE__);
3681		rc = ENOMEM;
3682		goto bce_dma_alloc_exit;
3683	}
3684
3685	for (i = 0; i < sc->rx_pages; i++) {
3686		if (bus_dmamem_alloc(sc->rx_bd_chain_tag,
3687		    (void **)&sc->rx_bd_chain[i],
3688		    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3689		    &sc->rx_bd_chain_map[i])) {
3690			BCE_PRINTF("%s(%d): Could not allocate RX descriptor "
3691			    "chain DMA memory!\n", __FILE__, __LINE__);
3692			rc = ENOMEM;
3693			goto bce_dma_alloc_exit;
3694		}
3695
3696		error = bus_dmamap_load(sc->rx_bd_chain_tag,
3697		    sc->rx_bd_chain_map[i], sc->rx_bd_chain[i],
3698		    BCE_RX_CHAIN_PAGE_SZ, bce_dma_map_addr,
3699		    &sc->rx_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3700
3701		if (error || sc->rx_bd_chain_paddr[i] == 0) {
3702			BCE_PRINTF("%s(%d): Could not map RX descriptor "
3703			    "chain DMA memory!\n", __FILE__, __LINE__);
3704			rc = ENOMEM;
3705			goto bce_dma_alloc_exit;
3706		}
3707
3708		DBPRINT(sc, BCE_INFO_LOAD, "%s(): rx_bd_chain_paddr[%d] = "
3709		    "0x%jX\n", __FUNCTION__, i,
3710		    (uintmax_t) sc->rx_bd_chain_paddr[i]);
3711	}
3712
3713	/*
3714	 * Create a DMA tag for RX mbufs.
3715	 */
3716	if (bce_hdr_split == TRUE)
3717		max_size = ((sc->rx_bd_mbuf_alloc_size < MCLBYTES) ?
3718		    MCLBYTES : sc->rx_bd_mbuf_alloc_size);
3719	else
3720		max_size = MJUM9BYTES;
3721
3722	DBPRINT(sc, BCE_INFO_LOAD, "%s(): Creating rx_mbuf_tag "
3723	    "(max size = 0x%jX)\n", __FUNCTION__, (uintmax_t)max_size);
3724
3725	if (bus_dma_tag_create(sc->parent_tag, BCE_RX_BUF_ALIGN,
3726	    BCE_DMA_BOUNDARY, sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL,
3727	    max_size, 1, max_size, 0, NULL, NULL, &sc->rx_mbuf_tag)) {
3728		BCE_PRINTF("%s(%d): Could not allocate RX mbuf DMA tag!\n",
3729		    __FILE__, __LINE__);
3730		rc = ENOMEM;
3731		goto bce_dma_alloc_exit;
3732	}
3733
3734	/* Create DMA maps for the RX mbuf clusters. */
3735	for (i = 0; i < TOTAL_RX_BD_ALLOC; i++) {
3736		if (bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_NOWAIT,
3737		    &sc->rx_mbuf_map[i])) {
3738			BCE_PRINTF("%s(%d): Unable to create RX mbuf "
3739			    "DMA map!\n", __FILE__, __LINE__);
3740			rc = ENOMEM;
3741			goto bce_dma_alloc_exit;
3742		}
3743	}
3744
3745	if (bce_hdr_split == TRUE) {
3746		/*
3747		 * Create a DMA tag for the page buffer descriptor chain,
3748		 * allocate and clear the memory, and fetch the physical
3749		 * address of the blocks.
3750		 */
3751		if (bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE,
3752			    BCE_DMA_BOUNDARY, BUS_SPACE_MAXADDR, sc->max_bus_addr,
3753			    NULL, NULL,	BCE_PG_CHAIN_PAGE_SZ, 1, BCE_PG_CHAIN_PAGE_SZ,
3754			    0, NULL, NULL, &sc->pg_bd_chain_tag)) {
3755			BCE_PRINTF("%s(%d): Could not allocate page descriptor "
3756			    "chain DMA tag!\n",	__FILE__, __LINE__);
3757			rc = ENOMEM;
3758			goto bce_dma_alloc_exit;
3759		}
3760
3761		for (i = 0; i < sc->pg_pages; i++) {
3762			if (bus_dmamem_alloc(sc->pg_bd_chain_tag,
3763			    (void **)&sc->pg_bd_chain[i],
3764			    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
3765			    &sc->pg_bd_chain_map[i])) {
3766				BCE_PRINTF("%s(%d): Could not allocate page "
3767				    "descriptor chain DMA memory!\n",
3768				    __FILE__, __LINE__);
3769				rc = ENOMEM;
3770				goto bce_dma_alloc_exit;
3771			}
3772
3773			error = bus_dmamap_load(sc->pg_bd_chain_tag,
3774			    sc->pg_bd_chain_map[i], sc->pg_bd_chain[i],
3775			    BCE_PG_CHAIN_PAGE_SZ, bce_dma_map_addr,
3776			    &sc->pg_bd_chain_paddr[i], BUS_DMA_NOWAIT);
3777
3778			if (error || sc->pg_bd_chain_paddr[i] == 0) {
3779				BCE_PRINTF("%s(%d): Could not map page descriptor "
3780					"chain DMA memory!\n", __FILE__, __LINE__);
3781				rc = ENOMEM;
3782				goto bce_dma_alloc_exit;
3783			}
3784
3785			DBPRINT(sc, BCE_INFO_LOAD, "%s(): pg_bd_chain_paddr[%d] = "
3786				"0x%jX\n", __FUNCTION__, i,
3787				(uintmax_t) sc->pg_bd_chain_paddr[i]);
3788		}
3789
3790		/*
3791		 * Create a DMA tag for page mbufs.
3792		 */
3793		if (bus_dma_tag_create(sc->parent_tag, 1, BCE_DMA_BOUNDARY,
3794		    sc->max_bus_addr, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
3795		    1, MCLBYTES, 0, NULL, NULL, &sc->pg_mbuf_tag)) {
3796			BCE_PRINTF("%s(%d): Could not allocate page mbuf "
3797				"DMA tag!\n", __FILE__, __LINE__);
3798			rc = ENOMEM;
3799			goto bce_dma_alloc_exit;
3800		}
3801
3802		/* Create DMA maps for the page mbuf clusters. */
3803		for (i = 0; i < TOTAL_PG_BD_ALLOC; i++) {
3804			if (bus_dmamap_create(sc->pg_mbuf_tag, BUS_DMA_NOWAIT,
3805				&sc->pg_mbuf_map[i])) {
3806				BCE_PRINTF("%s(%d): Unable to create page mbuf "
3807					"DMA map!\n", __FILE__, __LINE__);
3808				rc = ENOMEM;
3809				goto bce_dma_alloc_exit;
3810			}
3811		}
3812	}
3813
3814bce_dma_alloc_exit:
3815	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
3816	return(rc);
3817}
3818
3819/****************************************************************************/
3820/* Release all resources used by the driver.                                */
3821/*                                                                          */
3822/* Releases all resources acquired by the driver including interrupts,      */
3823/* interrupt handler, interfaces, mutexes, and DMA memory.                  */
3824/*                                                                          */
3825/* Returns:                                                                 */
3826/*   Nothing.                                                               */
3827/****************************************************************************/
3828static void
3829bce_release_resources(struct bce_softc *sc)
3830{
3831	device_t dev;
3832
3833	DBENTER(BCE_VERBOSE_RESET);
3834
3835	dev = sc->bce_dev;
3836
3837	bce_dma_free(sc);
3838
3839	if (sc->bce_intrhand != NULL) {
3840		DBPRINT(sc, BCE_INFO_RESET, "Removing interrupt handler.\n");
3841		bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
3842	}
3843
3844	if (sc->bce_res_irq != NULL) {
3845		DBPRINT(sc, BCE_INFO_RESET, "Releasing IRQ.\n");
3846		bus_release_resource(dev, SYS_RES_IRQ,
3847		    rman_get_rid(sc->bce_res_irq), sc->bce_res_irq);
3848	}
3849
3850	if (sc->bce_flags & (BCE_USING_MSI_FLAG | BCE_USING_MSIX_FLAG)) {
3851		DBPRINT(sc, BCE_INFO_RESET, "Releasing MSI/MSI-X vector.\n");
3852		pci_release_msi(dev);
3853	}
3854
3855	if (sc->bce_res_mem != NULL) {
3856		DBPRINT(sc, BCE_INFO_RESET, "Releasing PCI memory.\n");
3857		    bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
3858		    sc->bce_res_mem);
3859	}
3860
3861	if (sc->bce_ifp != NULL) {
3862		DBPRINT(sc, BCE_INFO_RESET, "Releasing IF.\n");
3863		if_free(sc->bce_ifp);
3864	}
3865
3866	if (mtx_initialized(&sc->bce_mtx))
3867		BCE_LOCK_DESTROY(sc);
3868
3869	DBEXIT(BCE_VERBOSE_RESET);
3870}
3871
3872/****************************************************************************/
3873/* Firmware synchronization.                                                */
3874/*                                                                          */
3875/* Before performing certain events such as a chip reset, synchronize with  */
3876/* the firmware first.                                                      */
3877/*                                                                          */
3878/* Returns:                                                                 */
3879/*   0 for success, positive value for failure.                             */
3880/****************************************************************************/
3881static int
3882bce_fw_sync(struct bce_softc *sc, u32 msg_data)
3883{
3884	int i, rc = 0;
3885	u32 val;
3886
3887	DBENTER(BCE_VERBOSE_RESET);
3888
3889	/* Don't waste any time if we've timed out before. */
3890	if (sc->bce_fw_timed_out == TRUE) {
3891		rc = EBUSY;
3892		goto bce_fw_sync_exit;
3893	}
3894
3895	/* Increment the message sequence number. */
3896	sc->bce_fw_wr_seq++;
3897	msg_data |= sc->bce_fw_wr_seq;
3898
3899 	DBPRINT(sc, BCE_VERBOSE_FIRMWARE, "bce_fw_sync(): msg_data = "
3900	    "0x%08X\n",	msg_data);
3901
3902	/* Send the message to the bootcode driver mailbox. */
3903	bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3904
3905	/* Wait for the bootcode to acknowledge the message. */
3906	for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
3907		/* Check for a response in the bootcode firmware mailbox. */
3908		val = bce_shmem_rd(sc, BCE_FW_MB);
3909		if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
3910			break;
3911		DELAY(1000);
3912	}
3913
3914	/* If we've timed out, tell bootcode that we've stopped waiting. */
3915	if (((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ)) &&
3916	    ((msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0)) {
3917		BCE_PRINTF("%s(%d): Firmware synchronization timeout! "
3918		    "msg_data = 0x%08X\n", __FILE__, __LINE__, msg_data);
3919
3920		msg_data &= ~BCE_DRV_MSG_CODE;
3921		msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
3922
3923		bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
3924
3925		sc->bce_fw_timed_out = TRUE;
3926		rc = EBUSY;
3927	}
3928
3929bce_fw_sync_exit:
3930	DBEXIT(BCE_VERBOSE_RESET);
3931	return (rc);
3932}
3933
3934/****************************************************************************/
3935/* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
3936/*                                                                          */
3937/* Returns:                                                                 */
3938/*   Nothing.                                                               */
3939/****************************************************************************/
3940static void
3941bce_load_rv2p_fw(struct bce_softc *sc, const u32 *rv2p_code,
3942	u32 rv2p_code_len, u32 rv2p_proc)
3943{
3944	int i;
3945	u32 val;
3946
3947	DBENTER(BCE_VERBOSE_RESET);
3948
3949	/* Set the page size used by RV2P. */
3950	if (rv2p_proc == RV2P_PROC2) {
3951		BCE_RV2P_PROC2_CHG_MAX_BD_PAGE(USABLE_RX_BD_PER_PAGE);
3952	}
3953
3954	for (i = 0; i < rv2p_code_len; i += 8) {
3955		REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
3956		rv2p_code++;
3957		REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
3958		rv2p_code++;
3959
3960		if (rv2p_proc == RV2P_PROC1) {
3961			val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
3962			REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
3963		}
3964		else {
3965			val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
3966			REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
3967		}
3968	}
3969
3970	/* Reset the processor, un-stall is done later. */
3971	if (rv2p_proc == RV2P_PROC1) {
3972		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
3973	}
3974	else {
3975		REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
3976	}
3977
3978	DBEXIT(BCE_VERBOSE_RESET);
3979}
3980
3981/****************************************************************************/
3982/* Load RISC processor firmware.                                            */
3983/*                                                                          */
3984/* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
3985/* associated with a particular processor.                                  */
3986/*                                                                          */
3987/* Returns:                                                                 */
3988/*   Nothing.                                                               */
3989/****************************************************************************/
3990static void
3991bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
3992	struct fw_info *fw)
3993{
3994	u32 offset;
3995
3996	DBENTER(BCE_VERBOSE_RESET);
3997
3998    bce_halt_cpu(sc, cpu_reg);
3999
4000	/* Load the Text area. */
4001	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
4002	if (fw->text) {
4003		int j;
4004
4005		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
4006			REG_WR_IND(sc, offset, fw->text[j]);
4007	        }
4008	}
4009
4010	/* Load the Data area. */
4011	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
4012	if (fw->data) {
4013		int j;
4014
4015		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
4016			REG_WR_IND(sc, offset, fw->data[j]);
4017		}
4018	}
4019
4020	/* Load the SBSS area. */
4021	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
4022	if (fw->sbss) {
4023		int j;
4024
4025		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
4026			REG_WR_IND(sc, offset, fw->sbss[j]);
4027		}
4028	}
4029
4030	/* Load the BSS area. */
4031	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
4032	if (fw->bss) {
4033		int j;
4034
4035		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
4036			REG_WR_IND(sc, offset, fw->bss[j]);
4037		}
4038	}
4039
4040	/* Load the Read-Only area. */
4041	offset = cpu_reg->spad_base +
4042		(fw->rodata_addr - cpu_reg->mips_view_base);
4043	if (fw->rodata) {
4044		int j;
4045
4046		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
4047			REG_WR_IND(sc, offset, fw->rodata[j]);
4048		}
4049	}
4050
4051	/* Clear the pre-fetch instruction and set the FW start address. */
4052	REG_WR_IND(sc, cpu_reg->inst, 0);
4053	REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
4054
4055	DBEXIT(BCE_VERBOSE_RESET);
4056}
4057
4058/****************************************************************************/
4059/* Starts the RISC processor.                                               */
4060/*                                                                          */
4061/* Assumes the CPU starting address has already been set.                   */
4062/*                                                                          */
4063/* Returns:                                                                 */
4064/*   Nothing.                                                               */
4065/****************************************************************************/
4066static void
4067bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
4068{
4069	u32 val;
4070
4071	DBENTER(BCE_VERBOSE_RESET);
4072
4073	/* Start the CPU. */
4074	val = REG_RD_IND(sc, cpu_reg->mode);
4075	val &= ~cpu_reg->mode_value_halt;
4076	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
4077	REG_WR_IND(sc, cpu_reg->mode, val);
4078
4079	DBEXIT(BCE_VERBOSE_RESET);
4080}
4081
4082/****************************************************************************/
4083/* Halts the RISC processor.                                                */
4084/*                                                                          */
4085/* Returns:                                                                 */
4086/*   Nothing.                                                               */
4087/****************************************************************************/
4088static void
4089bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
4090{
4091	u32 val;
4092
4093	DBENTER(BCE_VERBOSE_RESET);
4094
4095	/* Halt the CPU. */
4096	val = REG_RD_IND(sc, cpu_reg->mode);
4097	val |= cpu_reg->mode_value_halt;
4098	REG_WR_IND(sc, cpu_reg->mode, val);
4099	REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
4100
4101	DBEXIT(BCE_VERBOSE_RESET);
4102}
4103
4104/****************************************************************************/
4105/* Initialize the RX CPU.                                                   */
4106/*                                                                          */
4107/* Returns:                                                                 */
4108/*   Nothing.                                                               */
4109/****************************************************************************/
4110static void
4111bce_start_rxp_cpu(struct bce_softc *sc)
4112{
4113	struct cpu_reg cpu_reg;
4114
4115	DBENTER(BCE_VERBOSE_RESET);
4116
4117	cpu_reg.mode = BCE_RXP_CPU_MODE;
4118	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
4119	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
4120	cpu_reg.state = BCE_RXP_CPU_STATE;
4121	cpu_reg.state_value_clear = 0xffffff;
4122	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
4123	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
4124	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
4125	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
4126	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
4127	cpu_reg.spad_base = BCE_RXP_SCRATCH;
4128	cpu_reg.mips_view_base = 0x8000000;
4129
4130	DBPRINT(sc, BCE_INFO_RESET, "Starting RX firmware.\n");
4131	bce_start_cpu(sc, &cpu_reg);
4132
4133	DBEXIT(BCE_VERBOSE_RESET);
4134}
4135
4136/****************************************************************************/
4137/* Initialize the RX CPU.                                                   */
4138/*                                                                          */
4139/* Returns:                                                                 */
4140/*   Nothing.                                                               */
4141/****************************************************************************/
4142static void
4143bce_init_rxp_cpu(struct bce_softc *sc)
4144{
4145	struct cpu_reg cpu_reg;
4146	struct fw_info fw;
4147
4148	DBENTER(BCE_VERBOSE_RESET);
4149
4150	cpu_reg.mode = BCE_RXP_CPU_MODE;
4151	cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
4152	cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
4153	cpu_reg.state = BCE_RXP_CPU_STATE;
4154	cpu_reg.state_value_clear = 0xffffff;
4155	cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
4156	cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
4157	cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
4158	cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
4159	cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
4160	cpu_reg.spad_base = BCE_RXP_SCRATCH;
4161	cpu_reg.mips_view_base = 0x8000000;
4162
4163	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4164 		fw.ver_major = bce_RXP_b09FwReleaseMajor;
4165		fw.ver_minor = bce_RXP_b09FwReleaseMinor;
4166		fw.ver_fix = bce_RXP_b09FwReleaseFix;
4167		fw.start_addr = bce_RXP_b09FwStartAddr;
4168
4169		fw.text_addr = bce_RXP_b09FwTextAddr;
4170		fw.text_len = bce_RXP_b09FwTextLen;
4171		fw.text_index = 0;
4172		fw.text = bce_RXP_b09FwText;
4173
4174		fw.data_addr = bce_RXP_b09FwDataAddr;
4175		fw.data_len = bce_RXP_b09FwDataLen;
4176		fw.data_index = 0;
4177		fw.data = bce_RXP_b09FwData;
4178
4179		fw.sbss_addr = bce_RXP_b09FwSbssAddr;
4180		fw.sbss_len = bce_RXP_b09FwSbssLen;
4181		fw.sbss_index = 0;
4182		fw.sbss = bce_RXP_b09FwSbss;
4183
4184		fw.bss_addr = bce_RXP_b09FwBssAddr;
4185		fw.bss_len = bce_RXP_b09FwBssLen;
4186		fw.bss_index = 0;
4187		fw.bss = bce_RXP_b09FwBss;
4188
4189		fw.rodata_addr = bce_RXP_b09FwRodataAddr;
4190		fw.rodata_len = bce_RXP_b09FwRodataLen;
4191		fw.rodata_index = 0;
4192		fw.rodata = bce_RXP_b09FwRodata;
4193	} else {
4194		fw.ver_major = bce_RXP_b06FwReleaseMajor;
4195		fw.ver_minor = bce_RXP_b06FwReleaseMinor;
4196		fw.ver_fix = bce_RXP_b06FwReleaseFix;
4197		fw.start_addr = bce_RXP_b06FwStartAddr;
4198
4199		fw.text_addr = bce_RXP_b06FwTextAddr;
4200		fw.text_len = bce_RXP_b06FwTextLen;
4201		fw.text_index = 0;
4202		fw.text = bce_RXP_b06FwText;
4203
4204		fw.data_addr = bce_RXP_b06FwDataAddr;
4205		fw.data_len = bce_RXP_b06FwDataLen;
4206		fw.data_index = 0;
4207		fw.data = bce_RXP_b06FwData;
4208
4209		fw.sbss_addr = bce_RXP_b06FwSbssAddr;
4210		fw.sbss_len = bce_RXP_b06FwSbssLen;
4211		fw.sbss_index = 0;
4212		fw.sbss = bce_RXP_b06FwSbss;
4213
4214		fw.bss_addr = bce_RXP_b06FwBssAddr;
4215		fw.bss_len = bce_RXP_b06FwBssLen;
4216		fw.bss_index = 0;
4217		fw.bss = bce_RXP_b06FwBss;
4218
4219		fw.rodata_addr = bce_RXP_b06FwRodataAddr;
4220		fw.rodata_len = bce_RXP_b06FwRodataLen;
4221		fw.rodata_index = 0;
4222		fw.rodata = bce_RXP_b06FwRodata;
4223	}
4224
4225	DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
4226	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4227
4228    /* Delay RXP start until initialization is complete. */
4229
4230	DBEXIT(BCE_VERBOSE_RESET);
4231}
4232
4233/****************************************************************************/
4234/* Initialize the TX CPU.                                                   */
4235/*                                                                          */
4236/* Returns:                                                                 */
4237/*   Nothing.                                                               */
4238/****************************************************************************/
4239static void
4240bce_init_txp_cpu(struct bce_softc *sc)
4241{
4242	struct cpu_reg cpu_reg;
4243	struct fw_info fw;
4244
4245	DBENTER(BCE_VERBOSE_RESET);
4246
4247	cpu_reg.mode = BCE_TXP_CPU_MODE;
4248	cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
4249	cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
4250	cpu_reg.state = BCE_TXP_CPU_STATE;
4251	cpu_reg.state_value_clear = 0xffffff;
4252	cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
4253	cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
4254	cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
4255	cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
4256	cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
4257	cpu_reg.spad_base = BCE_TXP_SCRATCH;
4258	cpu_reg.mips_view_base = 0x8000000;
4259
4260	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4261		fw.ver_major = bce_TXP_b09FwReleaseMajor;
4262		fw.ver_minor = bce_TXP_b09FwReleaseMinor;
4263		fw.ver_fix = bce_TXP_b09FwReleaseFix;
4264		fw.start_addr = bce_TXP_b09FwStartAddr;
4265
4266		fw.text_addr = bce_TXP_b09FwTextAddr;
4267		fw.text_len = bce_TXP_b09FwTextLen;
4268		fw.text_index = 0;
4269		fw.text = bce_TXP_b09FwText;
4270
4271		fw.data_addr = bce_TXP_b09FwDataAddr;
4272		fw.data_len = bce_TXP_b09FwDataLen;
4273		fw.data_index = 0;
4274		fw.data = bce_TXP_b09FwData;
4275
4276		fw.sbss_addr = bce_TXP_b09FwSbssAddr;
4277		fw.sbss_len = bce_TXP_b09FwSbssLen;
4278		fw.sbss_index = 0;
4279		fw.sbss = bce_TXP_b09FwSbss;
4280
4281		fw.bss_addr = bce_TXP_b09FwBssAddr;
4282		fw.bss_len = bce_TXP_b09FwBssLen;
4283		fw.bss_index = 0;
4284		fw.bss = bce_TXP_b09FwBss;
4285
4286		fw.rodata_addr = bce_TXP_b09FwRodataAddr;
4287		fw.rodata_len = bce_TXP_b09FwRodataLen;
4288		fw.rodata_index = 0;
4289		fw.rodata = bce_TXP_b09FwRodata;
4290	} else {
4291		fw.ver_major = bce_TXP_b06FwReleaseMajor;
4292		fw.ver_minor = bce_TXP_b06FwReleaseMinor;
4293		fw.ver_fix = bce_TXP_b06FwReleaseFix;
4294		fw.start_addr = bce_TXP_b06FwStartAddr;
4295
4296		fw.text_addr = bce_TXP_b06FwTextAddr;
4297		fw.text_len = bce_TXP_b06FwTextLen;
4298		fw.text_index = 0;
4299		fw.text = bce_TXP_b06FwText;
4300
4301		fw.data_addr = bce_TXP_b06FwDataAddr;
4302		fw.data_len = bce_TXP_b06FwDataLen;
4303		fw.data_index = 0;
4304		fw.data = bce_TXP_b06FwData;
4305
4306		fw.sbss_addr = bce_TXP_b06FwSbssAddr;
4307		fw.sbss_len = bce_TXP_b06FwSbssLen;
4308		fw.sbss_index = 0;
4309		fw.sbss = bce_TXP_b06FwSbss;
4310
4311		fw.bss_addr = bce_TXP_b06FwBssAddr;
4312		fw.bss_len = bce_TXP_b06FwBssLen;
4313		fw.bss_index = 0;
4314		fw.bss = bce_TXP_b06FwBss;
4315
4316		fw.rodata_addr = bce_TXP_b06FwRodataAddr;
4317		fw.rodata_len = bce_TXP_b06FwRodataLen;
4318		fw.rodata_index = 0;
4319		fw.rodata = bce_TXP_b06FwRodata;
4320	}
4321
4322	DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
4323	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4324    bce_start_cpu(sc, &cpu_reg);
4325
4326	DBEXIT(BCE_VERBOSE_RESET);
4327}
4328
4329/****************************************************************************/
4330/* Initialize the TPAT CPU.                                                 */
4331/*                                                                          */
4332/* Returns:                                                                 */
4333/*   Nothing.                                                               */
4334/****************************************************************************/
4335static void
4336bce_init_tpat_cpu(struct bce_softc *sc)
4337{
4338	struct cpu_reg cpu_reg;
4339	struct fw_info fw;
4340
4341	DBENTER(BCE_VERBOSE_RESET);
4342
4343	cpu_reg.mode = BCE_TPAT_CPU_MODE;
4344	cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
4345	cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
4346	cpu_reg.state = BCE_TPAT_CPU_STATE;
4347	cpu_reg.state_value_clear = 0xffffff;
4348	cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
4349	cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
4350	cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
4351	cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
4352	cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
4353	cpu_reg.spad_base = BCE_TPAT_SCRATCH;
4354	cpu_reg.mips_view_base = 0x8000000;
4355
4356	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4357		fw.ver_major = bce_TPAT_b09FwReleaseMajor;
4358		fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
4359		fw.ver_fix = bce_TPAT_b09FwReleaseFix;
4360		fw.start_addr = bce_TPAT_b09FwStartAddr;
4361
4362		fw.text_addr = bce_TPAT_b09FwTextAddr;
4363		fw.text_len = bce_TPAT_b09FwTextLen;
4364		fw.text_index = 0;
4365		fw.text = bce_TPAT_b09FwText;
4366
4367		fw.data_addr = bce_TPAT_b09FwDataAddr;
4368		fw.data_len = bce_TPAT_b09FwDataLen;
4369		fw.data_index = 0;
4370		fw.data = bce_TPAT_b09FwData;
4371
4372		fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
4373		fw.sbss_len = bce_TPAT_b09FwSbssLen;
4374		fw.sbss_index = 0;
4375		fw.sbss = bce_TPAT_b09FwSbss;
4376
4377		fw.bss_addr = bce_TPAT_b09FwBssAddr;
4378		fw.bss_len = bce_TPAT_b09FwBssLen;
4379		fw.bss_index = 0;
4380		fw.bss = bce_TPAT_b09FwBss;
4381
4382		fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
4383		fw.rodata_len = bce_TPAT_b09FwRodataLen;
4384		fw.rodata_index = 0;
4385		fw.rodata = bce_TPAT_b09FwRodata;
4386	} else {
4387		fw.ver_major = bce_TPAT_b06FwReleaseMajor;
4388		fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
4389		fw.ver_fix = bce_TPAT_b06FwReleaseFix;
4390		fw.start_addr = bce_TPAT_b06FwStartAddr;
4391
4392		fw.text_addr = bce_TPAT_b06FwTextAddr;
4393		fw.text_len = bce_TPAT_b06FwTextLen;
4394		fw.text_index = 0;
4395		fw.text = bce_TPAT_b06FwText;
4396
4397		fw.data_addr = bce_TPAT_b06FwDataAddr;
4398		fw.data_len = bce_TPAT_b06FwDataLen;
4399		fw.data_index = 0;
4400		fw.data = bce_TPAT_b06FwData;
4401
4402		fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
4403		fw.sbss_len = bce_TPAT_b06FwSbssLen;
4404		fw.sbss_index = 0;
4405		fw.sbss = bce_TPAT_b06FwSbss;
4406
4407		fw.bss_addr = bce_TPAT_b06FwBssAddr;
4408		fw.bss_len = bce_TPAT_b06FwBssLen;
4409		fw.bss_index = 0;
4410		fw.bss = bce_TPAT_b06FwBss;
4411
4412		fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
4413		fw.rodata_len = bce_TPAT_b06FwRodataLen;
4414		fw.rodata_index = 0;
4415		fw.rodata = bce_TPAT_b06FwRodata;
4416	}
4417
4418	DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
4419	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4420	bce_start_cpu(sc, &cpu_reg);
4421
4422	DBEXIT(BCE_VERBOSE_RESET);
4423}
4424
4425/****************************************************************************/
4426/* Initialize the CP CPU.                                                   */
4427/*                                                                          */
4428/* Returns:                                                                 */
4429/*   Nothing.                                                               */
4430/****************************************************************************/
4431static void
4432bce_init_cp_cpu(struct bce_softc *sc)
4433{
4434	struct cpu_reg cpu_reg;
4435	struct fw_info fw;
4436
4437	DBENTER(BCE_VERBOSE_RESET);
4438
4439	cpu_reg.mode = BCE_CP_CPU_MODE;
4440	cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
4441	cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
4442	cpu_reg.state = BCE_CP_CPU_STATE;
4443	cpu_reg.state_value_clear = 0xffffff;
4444	cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
4445	cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
4446	cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
4447	cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
4448	cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
4449	cpu_reg.spad_base = BCE_CP_SCRATCH;
4450	cpu_reg.mips_view_base = 0x8000000;
4451
4452	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4453		fw.ver_major = bce_CP_b09FwReleaseMajor;
4454		fw.ver_minor = bce_CP_b09FwReleaseMinor;
4455		fw.ver_fix = bce_CP_b09FwReleaseFix;
4456		fw.start_addr = bce_CP_b09FwStartAddr;
4457
4458		fw.text_addr = bce_CP_b09FwTextAddr;
4459		fw.text_len = bce_CP_b09FwTextLen;
4460		fw.text_index = 0;
4461		fw.text = bce_CP_b09FwText;
4462
4463		fw.data_addr = bce_CP_b09FwDataAddr;
4464		fw.data_len = bce_CP_b09FwDataLen;
4465		fw.data_index = 0;
4466		fw.data = bce_CP_b09FwData;
4467
4468		fw.sbss_addr = bce_CP_b09FwSbssAddr;
4469		fw.sbss_len = bce_CP_b09FwSbssLen;
4470		fw.sbss_index = 0;
4471		fw.sbss = bce_CP_b09FwSbss;
4472
4473		fw.bss_addr = bce_CP_b09FwBssAddr;
4474		fw.bss_len = bce_CP_b09FwBssLen;
4475		fw.bss_index = 0;
4476		fw.bss = bce_CP_b09FwBss;
4477
4478		fw.rodata_addr = bce_CP_b09FwRodataAddr;
4479		fw.rodata_len = bce_CP_b09FwRodataLen;
4480		fw.rodata_index = 0;
4481		fw.rodata = bce_CP_b09FwRodata;
4482	} else {
4483		fw.ver_major = bce_CP_b06FwReleaseMajor;
4484		fw.ver_minor = bce_CP_b06FwReleaseMinor;
4485		fw.ver_fix = bce_CP_b06FwReleaseFix;
4486		fw.start_addr = bce_CP_b06FwStartAddr;
4487
4488		fw.text_addr = bce_CP_b06FwTextAddr;
4489		fw.text_len = bce_CP_b06FwTextLen;
4490		fw.text_index = 0;
4491		fw.text = bce_CP_b06FwText;
4492
4493		fw.data_addr = bce_CP_b06FwDataAddr;
4494		fw.data_len = bce_CP_b06FwDataLen;
4495		fw.data_index = 0;
4496		fw.data = bce_CP_b06FwData;
4497
4498		fw.sbss_addr = bce_CP_b06FwSbssAddr;
4499		fw.sbss_len = bce_CP_b06FwSbssLen;
4500		fw.sbss_index = 0;
4501		fw.sbss = bce_CP_b06FwSbss;
4502
4503		fw.bss_addr = bce_CP_b06FwBssAddr;
4504		fw.bss_len = bce_CP_b06FwBssLen;
4505		fw.bss_index = 0;
4506		fw.bss = bce_CP_b06FwBss;
4507
4508		fw.rodata_addr = bce_CP_b06FwRodataAddr;
4509		fw.rodata_len = bce_CP_b06FwRodataLen;
4510		fw.rodata_index = 0;
4511		fw.rodata = bce_CP_b06FwRodata;
4512	}
4513
4514	DBPRINT(sc, BCE_INFO_RESET, "Loading CP firmware.\n");
4515	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4516	bce_start_cpu(sc, &cpu_reg);
4517
4518	DBEXIT(BCE_VERBOSE_RESET);
4519}
4520
4521/****************************************************************************/
4522/* Initialize the COM CPU.                                                 */
4523/*                                                                          */
4524/* Returns:                                                                 */
4525/*   Nothing.                                                               */
4526/****************************************************************************/
4527static void
4528bce_init_com_cpu(struct bce_softc *sc)
4529{
4530	struct cpu_reg cpu_reg;
4531	struct fw_info fw;
4532
4533	DBENTER(BCE_VERBOSE_RESET);
4534
4535	cpu_reg.mode = BCE_COM_CPU_MODE;
4536	cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
4537	cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
4538	cpu_reg.state = BCE_COM_CPU_STATE;
4539	cpu_reg.state_value_clear = 0xffffff;
4540	cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
4541	cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
4542	cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
4543	cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
4544	cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
4545	cpu_reg.spad_base = BCE_COM_SCRATCH;
4546	cpu_reg.mips_view_base = 0x8000000;
4547
4548	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4549		fw.ver_major = bce_COM_b09FwReleaseMajor;
4550		fw.ver_minor = bce_COM_b09FwReleaseMinor;
4551		fw.ver_fix = bce_COM_b09FwReleaseFix;
4552		fw.start_addr = bce_COM_b09FwStartAddr;
4553
4554		fw.text_addr = bce_COM_b09FwTextAddr;
4555		fw.text_len = bce_COM_b09FwTextLen;
4556		fw.text_index = 0;
4557		fw.text = bce_COM_b09FwText;
4558
4559		fw.data_addr = bce_COM_b09FwDataAddr;
4560		fw.data_len = bce_COM_b09FwDataLen;
4561		fw.data_index = 0;
4562		fw.data = bce_COM_b09FwData;
4563
4564		fw.sbss_addr = bce_COM_b09FwSbssAddr;
4565		fw.sbss_len = bce_COM_b09FwSbssLen;
4566		fw.sbss_index = 0;
4567		fw.sbss = bce_COM_b09FwSbss;
4568
4569		fw.bss_addr = bce_COM_b09FwBssAddr;
4570		fw.bss_len = bce_COM_b09FwBssLen;
4571		fw.bss_index = 0;
4572		fw.bss = bce_COM_b09FwBss;
4573
4574		fw.rodata_addr = bce_COM_b09FwRodataAddr;
4575		fw.rodata_len = bce_COM_b09FwRodataLen;
4576		fw.rodata_index = 0;
4577		fw.rodata = bce_COM_b09FwRodata;
4578	} else {
4579		fw.ver_major = bce_COM_b06FwReleaseMajor;
4580		fw.ver_minor = bce_COM_b06FwReleaseMinor;
4581		fw.ver_fix = bce_COM_b06FwReleaseFix;
4582		fw.start_addr = bce_COM_b06FwStartAddr;
4583
4584		fw.text_addr = bce_COM_b06FwTextAddr;
4585		fw.text_len = bce_COM_b06FwTextLen;
4586		fw.text_index = 0;
4587		fw.text = bce_COM_b06FwText;
4588
4589		fw.data_addr = bce_COM_b06FwDataAddr;
4590		fw.data_len = bce_COM_b06FwDataLen;
4591		fw.data_index = 0;
4592		fw.data = bce_COM_b06FwData;
4593
4594		fw.sbss_addr = bce_COM_b06FwSbssAddr;
4595		fw.sbss_len = bce_COM_b06FwSbssLen;
4596		fw.sbss_index = 0;
4597		fw.sbss = bce_COM_b06FwSbss;
4598
4599		fw.bss_addr = bce_COM_b06FwBssAddr;
4600		fw.bss_len = bce_COM_b06FwBssLen;
4601		fw.bss_index = 0;
4602		fw.bss = bce_COM_b06FwBss;
4603
4604		fw.rodata_addr = bce_COM_b06FwRodataAddr;
4605		fw.rodata_len = bce_COM_b06FwRodataLen;
4606		fw.rodata_index = 0;
4607		fw.rodata = bce_COM_b06FwRodata;
4608	}
4609
4610	DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
4611	bce_load_cpu_fw(sc, &cpu_reg, &fw);
4612	bce_start_cpu(sc, &cpu_reg);
4613
4614	DBEXIT(BCE_VERBOSE_RESET);
4615}
4616
4617/****************************************************************************/
4618/* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
4619/*                                                                          */
4620/* Loads the firmware for each CPU and starts the CPU.                      */
4621/*                                                                          */
4622/* Returns:                                                                 */
4623/*   Nothing.                                                               */
4624/****************************************************************************/
4625static void
4626bce_init_cpus(struct bce_softc *sc)
4627{
4628	DBENTER(BCE_VERBOSE_RESET);
4629
4630	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4631		if ((BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax)) {
4632			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
4633			    sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
4634			bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
4635			    sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
4636		} else {
4637			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
4638			    sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
4639			bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
4640			    sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
4641		}
4642
4643	} else {
4644		bce_load_rv2p_fw(sc, bce_rv2p_proc1,
4645		    sizeof(bce_rv2p_proc1), RV2P_PROC1);
4646		bce_load_rv2p_fw(sc, bce_rv2p_proc2,
4647		    sizeof(bce_rv2p_proc2), RV2P_PROC2);
4648	}
4649
4650	bce_init_rxp_cpu(sc);
4651	bce_init_txp_cpu(sc);
4652	bce_init_tpat_cpu(sc);
4653	bce_init_com_cpu(sc);
4654	bce_init_cp_cpu(sc);
4655
4656	DBEXIT(BCE_VERBOSE_RESET);
4657}
4658
4659/****************************************************************************/
4660/* Initialize context memory.                                               */
4661/*                                                                          */
4662/* Clears the memory associated with each Context ID (CID).                 */
4663/*                                                                          */
4664/* Returns:                                                                 */
4665/*   Nothing.                                                               */
4666/****************************************************************************/
4667static int
4668bce_init_ctx(struct bce_softc *sc)
4669{
4670	u32 offset, val, vcid_addr;
4671	int i, j, rc, retry_cnt;
4672
4673	rc = 0;
4674	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4675
4676	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4677		retry_cnt = CTX_INIT_RETRY_COUNT;
4678
4679		DBPRINT(sc, BCE_INFO_CTX, "Initializing 5709 context.\n");
4680
4681		/*
4682		 * BCM5709 context memory may be cached
4683		 * in host memory so prepare the host memory
4684		 * for access.
4685		 */
4686		val = BCE_CTX_COMMAND_ENABLED |
4687		    BCE_CTX_COMMAND_MEM_INIT | (1 << 12);
4688		val |= (BCM_PAGE_BITS - 8) << 16;
4689		REG_WR(sc, BCE_CTX_COMMAND, val);
4690
4691		/* Wait for mem init command to complete. */
4692		for (i = 0; i < retry_cnt; i++) {
4693			val = REG_RD(sc, BCE_CTX_COMMAND);
4694			if (!(val & BCE_CTX_COMMAND_MEM_INIT))
4695				break;
4696			DELAY(2);
4697		}
4698		if ((val & BCE_CTX_COMMAND_MEM_INIT) != 0) {
4699			BCE_PRINTF("%s(): Context memory initialization failed!\n",
4700			    __FUNCTION__);
4701			rc = EBUSY;
4702			goto init_ctx_fail;
4703		}
4704
4705		for (i = 0; i < sc->ctx_pages; i++) {
4706			/* Set the physical address of the context memory. */
4707			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
4708			    BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
4709			    BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
4710			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
4711			    BCE_ADDR_HI(sc->ctx_paddr[i]));
4712			REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL, i |
4713			    BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4714
4715			/* Verify the context memory write was successful. */
4716			for (j = 0; j < retry_cnt; j++) {
4717				val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
4718				if ((val &
4719				    BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
4720					break;
4721				DELAY(5);
4722			}
4723			if ((val & BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) != 0) {
4724				BCE_PRINTF("%s(): Failed to initialize "
4725				    "context page %d!\n", __FUNCTION__, i);
4726				rc = EBUSY;
4727				goto init_ctx_fail;
4728			}
4729		}
4730	} else {
4731		DBPRINT(sc, BCE_INFO, "Initializing 5706/5708 context.\n");
4732
4733		/*
4734		 * For the 5706/5708, context memory is local to
4735		 * the controller, so initialize the controller
4736		 * context memory.
4737		 */
4738
4739		vcid_addr = GET_CID_ADDR(96);
4740		while (vcid_addr) {
4741			vcid_addr -= PHY_CTX_SIZE;
4742
4743			REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
4744			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4745
4746			for(offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
4747				CTX_WR(sc, 0x00, offset, 0);
4748			}
4749
4750			REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
4751			REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
4752		}
4753	}
4754init_ctx_fail:
4755	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_CTX);
4756	return (rc);
4757}
4758
4759/****************************************************************************/
4760/* Fetch the permanent MAC address of the controller.                       */
4761/*                                                                          */
4762/* Returns:                                                                 */
4763/*   Nothing.                                                               */
4764/****************************************************************************/
4765static void
4766bce_get_mac_addr(struct bce_softc *sc)
4767{
4768	u32 mac_lo = 0, mac_hi = 0;
4769
4770	DBENTER(BCE_VERBOSE_RESET);
4771
4772	/*
4773	 * The NetXtreme II bootcode populates various NIC
4774	 * power-on and runtime configuration items in a
4775	 * shared memory area.  The factory configured MAC
4776	 * address is available from both NVRAM and the
4777	 * shared memory area so we'll read the value from
4778	 * shared memory for speed.
4779	 */
4780
4781	mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
4782	mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
4783
4784	if ((mac_lo == 0) && (mac_hi == 0)) {
4785		BCE_PRINTF("%s(%d): Invalid Ethernet address!\n",
4786		    __FILE__, __LINE__);
4787	} else {
4788		sc->eaddr[0] = (u_char)(mac_hi >> 8);
4789		sc->eaddr[1] = (u_char)(mac_hi >> 0);
4790		sc->eaddr[2] = (u_char)(mac_lo >> 24);
4791		sc->eaddr[3] = (u_char)(mac_lo >> 16);
4792		sc->eaddr[4] = (u_char)(mac_lo >> 8);
4793		sc->eaddr[5] = (u_char)(mac_lo >> 0);
4794	}
4795
4796	DBPRINT(sc, BCE_INFO_MISC, "Permanent Ethernet "
4797	    "address = %6D\n", sc->eaddr, ":");
4798	DBEXIT(BCE_VERBOSE_RESET);
4799}
4800
4801/****************************************************************************/
4802/* Program the MAC address.                                                 */
4803/*                                                                          */
4804/* Returns:                                                                 */
4805/*   Nothing.                                                               */
4806/****************************************************************************/
4807static void
4808bce_set_mac_addr(struct bce_softc *sc)
4809{
4810	u32 val;
4811	u8 *mac_addr = sc->eaddr;
4812
4813	/* ToDo: Add support for setting multiple MAC addresses. */
4814
4815	DBENTER(BCE_VERBOSE_RESET);
4816	DBPRINT(sc, BCE_INFO_MISC, "Setting Ethernet address = "
4817	    "%6D\n", sc->eaddr, ":");
4818
4819	val = (mac_addr[0] << 8) | mac_addr[1];
4820
4821	REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
4822
4823	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4824	    (mac_addr[4] << 8) | mac_addr[5];
4825
4826	REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
4827
4828	DBEXIT(BCE_VERBOSE_RESET);
4829}
4830
4831/****************************************************************************/
4832/* Stop the controller.                                                     */
4833/*                                                                          */
4834/* Returns:                                                                 */
4835/*   Nothing.                                                               */
4836/****************************************************************************/
4837static void
4838bce_stop(struct bce_softc *sc)
4839{
4840	struct ifnet *ifp;
4841
4842	DBENTER(BCE_VERBOSE_RESET);
4843
4844	BCE_LOCK_ASSERT(sc);
4845
4846	ifp = sc->bce_ifp;
4847
4848	callout_stop(&sc->bce_tick_callout);
4849
4850	/* Disable the transmit/receive blocks. */
4851	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
4852	REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4853	DELAY(20);
4854
4855	bce_disable_intr(sc);
4856
4857	/* Free RX buffers. */
4858	if (bce_hdr_split == TRUE) {
4859		bce_free_pg_chain(sc);
4860	}
4861	bce_free_rx_chain(sc);
4862
4863	/* Free TX buffers. */
4864	bce_free_tx_chain(sc);
4865
4866	sc->watchdog_timer = 0;
4867
4868	sc->bce_link_up = FALSE;
4869
4870	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
4871
4872	DBEXIT(BCE_VERBOSE_RESET);
4873}
4874
4875static int
4876bce_reset(struct bce_softc *sc, u32 reset_code)
4877{
4878	u32 emac_mode_save, val;
4879	int i, rc = 0;
4880	static const u32 emac_mode_mask = BCE_EMAC_MODE_PORT |
4881	    BCE_EMAC_MODE_HALF_DUPLEX | BCE_EMAC_MODE_25G;
4882
4883	DBENTER(BCE_VERBOSE_RESET);
4884
4885	DBPRINT(sc, BCE_VERBOSE_RESET, "%s(): reset_code = 0x%08X\n",
4886	    __FUNCTION__, reset_code);
4887
4888	/*
4889	 * If ASF/IPMI is operational, then the EMAC Mode register already
4890	 * contains appropriate values for the link settings that have
4891	 * been auto-negotiated.  Resetting the chip will clobber those
4892	 * values.  Save the important bits so we can restore them after
4893	 * the reset.
4894	 */
4895	emac_mode_save = REG_RD(sc, BCE_EMAC_MODE) & emac_mode_mask;
4896
4897	/* Wait for pending PCI transactions to complete. */
4898	REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
4899	    BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4900	    BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4901	    BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4902	    BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4903	val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
4904	DELAY(5);
4905
4906	/* Disable DMA */
4907	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4908		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
4909		val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
4910		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
4911	}
4912
4913	/* Assume bootcode is running. */
4914	sc->bce_fw_timed_out = FALSE;
4915	sc->bce_drv_cardiac_arrest = FALSE;
4916
4917	/* Give the firmware a chance to prepare for the reset. */
4918	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
4919	if (rc)
4920		goto bce_reset_exit;
4921
4922	/* Set a firmware reminder that this is a soft reset. */
4923	bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE, BCE_DRV_RESET_SIGNATURE_MAGIC);
4924
4925	/* Dummy read to force the chip to complete all current transactions. */
4926	val = REG_RD(sc, BCE_MISC_ID);
4927
4928	/* Chip reset. */
4929	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
4930		REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
4931		REG_RD(sc, BCE_MISC_COMMAND);
4932		DELAY(5);
4933
4934		val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4935		    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4936
4937		pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
4938	} else {
4939		val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4940		    BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4941		    BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4942		REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
4943
4944		/* Allow up to 30us for reset to complete. */
4945		for (i = 0; i < 10; i++) {
4946			val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
4947			if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4948			    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
4949				break;
4950			}
4951			DELAY(10);
4952		}
4953
4954		/* Check that reset completed successfully. */
4955		if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4956		    BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4957			BCE_PRINTF("%s(%d): Reset failed!\n",
4958			    __FILE__, __LINE__);
4959			rc = EBUSY;
4960			goto bce_reset_exit;
4961		}
4962	}
4963
4964	/* Make sure byte swapping is properly configured. */
4965	val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
4966	if (val != 0x01020304) {
4967		BCE_PRINTF("%s(%d): Byte swap is incorrect!\n",
4968		    __FILE__, __LINE__);
4969		rc = ENODEV;
4970		goto bce_reset_exit;
4971	}
4972
4973	/* Just completed a reset, assume that firmware is running again. */
4974	sc->bce_fw_timed_out = FALSE;
4975	sc->bce_drv_cardiac_arrest = FALSE;
4976
4977	/* Wait for the firmware to finish its initialization. */
4978	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
4979	if (rc)
4980		BCE_PRINTF("%s(%d): Firmware did not complete "
4981		    "initialization!\n", __FILE__, __LINE__);
4982	/* Get firmware capabilities. */
4983	bce_fw_cap_init(sc);
4984
4985bce_reset_exit:
4986	/* Restore EMAC Mode bits needed to keep ASF/IPMI running. */
4987	if (reset_code == BCE_DRV_MSG_CODE_RESET) {
4988		val = REG_RD(sc, BCE_EMAC_MODE);
4989		val = (val & ~emac_mode_mask) | emac_mode_save;
4990		REG_WR(sc, BCE_EMAC_MODE, val);
4991	}
4992
4993	DBEXIT(BCE_VERBOSE_RESET);
4994	return (rc);
4995}
4996
4997static int
4998bce_chipinit(struct bce_softc *sc)
4999{
5000	u32 val;
5001	int rc = 0;
5002
5003	DBENTER(BCE_VERBOSE_RESET);
5004
5005	bce_disable_intr(sc);
5006
5007	/*
5008	 * Initialize DMA byte/word swapping, configure the number of DMA
5009	 * channels and PCI clock compensation delay.
5010	 */
5011	val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
5012	    BCE_DMA_CONFIG_DATA_WORD_SWAP |
5013#if BYTE_ORDER == BIG_ENDIAN
5014	    BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
5015#endif
5016	    BCE_DMA_CONFIG_CNTL_WORD_SWAP |
5017	    DMA_READ_CHANS << 12 |
5018	    DMA_WRITE_CHANS << 16;
5019
5020	val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
5021
5022	if ((sc->bce_flags & BCE_PCIX_FLAG) && (sc->bus_speed_mhz == 133))
5023		val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
5024
5025	/*
5026	 * This setting resolves a problem observed on certain Intel PCI
5027	 * chipsets that cannot handle multiple outstanding DMA operations.
5028	 * See errata E9_5706A1_65.
5029	 */
5030	if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5031	    (BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0) &&
5032	    !(sc->bce_flags & BCE_PCIX_FLAG))
5033		val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
5034
5035	REG_WR(sc, BCE_DMA_CONFIG, val);
5036
5037	/* Enable the RX_V2P and Context state machines before access. */
5038	REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
5039	    BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
5040	    BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
5041	    BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
5042
5043	/* Initialize context mapping and zero out the quick contexts. */
5044	if ((rc = bce_init_ctx(sc)) != 0)
5045		goto bce_chipinit_exit;
5046
5047	/* Initialize the on-boards CPUs */
5048	bce_init_cpus(sc);
5049
5050	/* Enable management frames (NC-SI) to flow to the MCP. */
5051	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5052		val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) | BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
5053		REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
5054	}
5055
5056	/* Prepare NVRAM for access. */
5057	if ((rc = bce_init_nvram(sc)) != 0)
5058		goto bce_chipinit_exit;
5059
5060	/* Set the kernel bypass block size */
5061	val = REG_RD(sc, BCE_MQ_CONFIG);
5062	val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
5063	val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
5064
5065	/* Enable bins used on the 5709. */
5066	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
5067		val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
5068		if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
5069			val |= BCE_MQ_CONFIG_HALT_DIS;
5070	}
5071
5072	REG_WR(sc, BCE_MQ_CONFIG, val);
5073
5074	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
5075	REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
5076	REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
5077
5078	/* Set the page size and clear the RV2P processor stall bits. */
5079	val = (BCM_PAGE_BITS - 8) << 24;
5080	REG_WR(sc, BCE_RV2P_CONFIG, val);
5081
5082	/* Configure page size. */
5083	val = REG_RD(sc, BCE_TBDR_CONFIG);
5084	val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
5085	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
5086	REG_WR(sc, BCE_TBDR_CONFIG, val);
5087
5088	/* Set the perfect match control register to default. */
5089	REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
5090
5091bce_chipinit_exit:
5092	DBEXIT(BCE_VERBOSE_RESET);
5093
5094	return(rc);
5095}
5096
5097/****************************************************************************/
5098/* Initialize the controller in preparation to send/receive traffic.        */
5099/*                                                                          */
5100/* Returns:                                                                 */
5101/*   0 for success, positive value for failure.                             */
5102/****************************************************************************/
5103static int
5104bce_blockinit(struct bce_softc *sc)
5105{
5106	u32 reg, val;
5107	int rc = 0;
5108
5109	DBENTER(BCE_VERBOSE_RESET);
5110
5111	/* Load the hardware default MAC address. */
5112	bce_set_mac_addr(sc);
5113
5114	/* Set the Ethernet backoff seed value */
5115	val = sc->eaddr[0]         + (sc->eaddr[1] << 8) +
5116	      (sc->eaddr[2] << 16) + (sc->eaddr[3]     ) +
5117	      (sc->eaddr[4] << 8)  + (sc->eaddr[5] << 16);
5118	REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
5119
5120	sc->last_status_idx = 0;
5121	sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
5122
5123	/* Set up link change interrupt generation. */
5124	REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
5125
5126	/* Program the physical address of the status block. */
5127	REG_WR(sc, BCE_HC_STATUS_ADDR_L,
5128	    BCE_ADDR_LO(sc->status_block_paddr));
5129	REG_WR(sc, BCE_HC_STATUS_ADDR_H,
5130	    BCE_ADDR_HI(sc->status_block_paddr));
5131
5132	/* Program the physical address of the statistics block. */
5133	REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
5134	    BCE_ADDR_LO(sc->stats_block_paddr));
5135	REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
5136	    BCE_ADDR_HI(sc->stats_block_paddr));
5137
5138	/*
5139	 * Program various host coalescing parameters.
5140	 * Trip points control how many BDs should be ready before generating
5141	 * an interrupt while ticks control how long a BD can sit in the chain
5142	 * before generating an interrupt.
5143	 */
5144	REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5145	    (sc->bce_tx_quick_cons_trip_int << 16) |
5146	    sc->bce_tx_quick_cons_trip);
5147	REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5148	    (sc->bce_rx_quick_cons_trip_int << 16) |
5149	    sc->bce_rx_quick_cons_trip);
5150	REG_WR(sc, BCE_HC_TX_TICKS,
5151	    (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
5152	REG_WR(sc, BCE_HC_RX_TICKS,
5153	    (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
5154	REG_WR(sc, BCE_HC_STATS_TICKS, sc->bce_stats_ticks & 0xffff00);
5155	REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5156	/* Not used for L2. */
5157	REG_WR(sc, BCE_HC_COMP_PROD_TRIP, 0);
5158	REG_WR(sc, BCE_HC_COM_TICKS, 0);
5159	REG_WR(sc, BCE_HC_CMD_TICKS, 0);
5160
5161	/* Configure the Host Coalescing block. */
5162	val = BCE_HC_CONFIG_RX_TMR_MODE | BCE_HC_CONFIG_TX_TMR_MODE |
5163	    BCE_HC_CONFIG_COLLECT_STATS;
5164
5165#if 0
5166	/* ToDo: Add MSI-X support. */
5167	if (sc->bce_flags & BCE_USING_MSIX_FLAG) {
5168		u32 base = ((BCE_TX_VEC - 1) * BCE_HC_SB_CONFIG_SIZE) +
5169		    BCE_HC_SB_CONFIG_1;
5170
5171		REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
5172
5173		REG_WR(sc, base, BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
5174		    BCE_HC_SB_CONFIG_1_ONE_SHOT);
5175
5176		REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
5177		    (sc->tx_quick_cons_trip_int << 16) |
5178		     sc->tx_quick_cons_trip);
5179
5180		REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
5181		    (sc->tx_ticks_int << 16) | sc->tx_ticks);
5182
5183		val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
5184	}
5185
5186	/*
5187	 * Tell the HC block to automatically set the
5188	 * INT_MASK bit after an MSI/MSI-X interrupt
5189	 * is generated so the driver doesn't have to.
5190	 */
5191	if (sc->bce_flags & BCE_ONE_SHOT_MSI_FLAG)
5192		val |= BCE_HC_CONFIG_ONE_SHOT;
5193
5194	/* Set the MSI-X status blocks to 128 byte boundaries. */
5195	if (sc->bce_flags & BCE_USING_MSIX_FLAG)
5196		val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
5197#endif
5198
5199	REG_WR(sc, BCE_HC_CONFIG, val);
5200
5201	/* Clear the internal statistics counters. */
5202	REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
5203
5204	/* Verify that bootcode is running. */
5205	reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
5206
5207	DBRUNIF(DB_RANDOMTRUE(bootcode_running_failure_sim_control),
5208	    BCE_PRINTF("%s(%d): Simulating bootcode failure.\n",
5209	    __FILE__, __LINE__);
5210	    reg = 0);
5211
5212	if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5213	    BCE_DEV_INFO_SIGNATURE_MAGIC) {
5214		BCE_PRINTF("%s(%d): Bootcode not running! Found: 0x%08X, "
5215		    "Expected: 08%08X\n", __FILE__, __LINE__,
5216		    (reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK),
5217		    BCE_DEV_INFO_SIGNATURE_MAGIC);
5218		rc = ENODEV;
5219		goto bce_blockinit_exit;
5220	}
5221
5222	/* Enable DMA */
5223	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
5224		val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
5225		val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
5226		REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
5227	}
5228
5229	/* Allow bootcode to apply additional fixes before enabling MAC. */
5230	rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 |
5231	    BCE_DRV_MSG_CODE_RESET);
5232
5233	/* Enable link state change interrupt generation. */
5234	REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
5235
5236	/* Enable the RXP. */
5237	bce_start_rxp_cpu(sc);
5238
5239	/* Disable management frames (NC-SI) from flowing to the MCP. */
5240	if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5241		val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
5242		    ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
5243		REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
5244	}
5245
5246	/* Enable all remaining blocks in the MAC. */
5247	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
5248		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
5249		    BCE_MISC_ENABLE_DEFAULT_XI);
5250	else
5251		REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
5252		    BCE_MISC_ENABLE_DEFAULT);
5253
5254	REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
5255	DELAY(20);
5256
5257	/* Save the current host coalescing block settings. */
5258	sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
5259
5260bce_blockinit_exit:
5261	DBEXIT(BCE_VERBOSE_RESET);
5262
5263	return (rc);
5264}
5265
5266/****************************************************************************/
5267/* Encapsulate an mbuf into the rx_bd chain.                                */
5268/*                                                                          */
5269/* Returns:                                                                 */
5270/*   0 for success, positive value for failure.                             */
5271/****************************************************************************/
5272static int
5273bce_get_rx_buf(struct bce_softc *sc, u16 prod, u16 chain_prod, u32 *prod_bseq)
5274{
5275	bus_dma_segment_t segs[1];
5276	struct mbuf *m_new = NULL;
5277	struct rx_bd *rxbd;
5278	int nsegs, error, rc = 0;
5279#ifdef BCE_DEBUG
5280	u16 debug_chain_prod = chain_prod;
5281#endif
5282
5283	DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5284
5285	/* Make sure the inputs are valid. */
5286	DBRUNIF((chain_prod > MAX_RX_BD_ALLOC),
5287	    BCE_PRINTF("%s(%d): RX producer out of range: "
5288	    "0x%04X > 0x%04X\n", __FILE__, __LINE__,
5289	    chain_prod, (u16)MAX_RX_BD_ALLOC));
5290
5291	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
5292	    "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__,
5293	    prod, chain_prod, *prod_bseq);
5294
5295	/* Update some debug statistic counters */
5296	DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
5297	    sc->rx_low_watermark = sc->free_rx_bd);
5298	DBRUNIF((sc->free_rx_bd == sc->max_rx_bd),
5299	    sc->rx_empty_count++);
5300
5301	/* Simulate an mbuf allocation failure. */
5302	DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5303	    sc->mbuf_alloc_failed_count++;
5304	    sc->mbuf_alloc_failed_sim_count++;
5305	    rc = ENOBUFS;
5306	    goto bce_get_rx_buf_exit);
5307
5308	/* This is a new mbuf allocation. */
5309	if (bce_hdr_split == TRUE)
5310		MGETHDR(m_new, M_NOWAIT, MT_DATA);
5311	else
5312		m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
5313		    sc->rx_bd_mbuf_alloc_size);
5314
5315	if (m_new == NULL) {
5316		sc->mbuf_alloc_failed_count++;
5317		rc = ENOBUFS;
5318		goto bce_get_rx_buf_exit;
5319	}
5320
5321	DBRUN(sc->debug_rx_mbuf_alloc++);
5322
5323	/* Make sure we have a valid packet header. */
5324	M_ASSERTPKTHDR(m_new);
5325
5326	/* Initialize the mbuf size and pad if necessary for alignment. */
5327	m_new->m_pkthdr.len = m_new->m_len = sc->rx_bd_mbuf_alloc_size;
5328	m_adj(m_new, sc->rx_bd_mbuf_align_pad);
5329
5330	/* ToDo: Consider calling m_fragment() to test error handling. */
5331
5332	/* Map the mbuf cluster into device memory. */
5333	error = bus_dmamap_load_mbuf_sg(sc->rx_mbuf_tag,
5334	    sc->rx_mbuf_map[chain_prod], m_new, segs, &nsegs, BUS_DMA_NOWAIT);
5335
5336	/* Handle any mapping errors. */
5337	if (error) {
5338		BCE_PRINTF("%s(%d): Error mapping mbuf into RX "
5339		    "chain (%d)!\n", __FILE__, __LINE__, error);
5340
5341		sc->dma_map_addr_rx_failed_count++;
5342		m_freem(m_new);
5343
5344		DBRUN(sc->debug_rx_mbuf_alloc--);
5345
5346		rc = ENOBUFS;
5347		goto bce_get_rx_buf_exit;
5348	}
5349
5350	/* All mbufs must map to a single segment. */
5351	KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
5352	    __FUNCTION__, nsegs));
5353
5354	/* Setup the rx_bd for the segment. */
5355	rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
5356
5357	rxbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
5358	rxbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
5359	rxbd->rx_bd_len       = htole32(segs[0].ds_len);
5360	rxbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5361	*prod_bseq += segs[0].ds_len;
5362
5363	/* Save the mbuf and update our counter. */
5364	sc->rx_mbuf_ptr[chain_prod] = m_new;
5365	sc->free_rx_bd -= nsegs;
5366
5367	DBRUNMSG(BCE_INSANE_RECV,
5368	    bce_dump_rx_mbuf_chain(sc, debug_chain_prod, nsegs));
5369
5370	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5371	    "chain_prod = 0x%04X, prod_bseq = 0x%08X\n", __FUNCTION__, prod,
5372	    chain_prod, *prod_bseq);
5373
5374bce_get_rx_buf_exit:
5375	DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5376
5377	return(rc);
5378}
5379
5380/****************************************************************************/
5381/* Encapsulate an mbuf cluster into the page chain.                         */
5382/*                                                                          */
5383/* Returns:                                                                 */
5384/*   0 for success, positive value for failure.                             */
5385/****************************************************************************/
5386static int
5387bce_get_pg_buf(struct bce_softc *sc, u16 prod, u16 prod_idx)
5388{
5389	bus_dma_segment_t segs[1];
5390	struct mbuf *m_new = NULL;
5391	struct rx_bd *pgbd;
5392	int error, nsegs, rc = 0;
5393#ifdef BCE_DEBUG
5394	u16 debug_prod_idx = prod_idx;
5395#endif
5396
5397	DBENTER(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5398
5399	/* Make sure the inputs are valid. */
5400	DBRUNIF((prod_idx > MAX_PG_BD_ALLOC),
5401	    BCE_PRINTF("%s(%d): page producer out of range: "
5402	    "0x%04X > 0x%04X\n", __FILE__, __LINE__,
5403	    prod_idx, (u16)MAX_PG_BD_ALLOC));
5404
5405	DBPRINT(sc, BCE_EXTREME_RECV, "%s(enter): prod = 0x%04X, "
5406	    "chain_prod = 0x%04X\n", __FUNCTION__, prod, prod_idx);
5407
5408	/* Update counters if we've hit a new low or run out of pages. */
5409	DBRUNIF((sc->free_pg_bd < sc->pg_low_watermark),
5410	    sc->pg_low_watermark = sc->free_pg_bd);
5411	DBRUNIF((sc->free_pg_bd == sc->max_pg_bd), sc->pg_empty_count++);
5412
5413	/* Simulate an mbuf allocation failure. */
5414	DBRUNIF(DB_RANDOMTRUE(mbuf_alloc_failed_sim_control),
5415	    sc->mbuf_alloc_failed_count++;
5416	    sc->mbuf_alloc_failed_sim_count++;
5417	    rc = ENOBUFS;
5418	    goto bce_get_pg_buf_exit);
5419
5420	/* This is a new mbuf allocation. */
5421	m_new = m_getcl(M_NOWAIT, MT_DATA, 0);
5422	if (m_new == NULL) {
5423		sc->mbuf_alloc_failed_count++;
5424		rc = ENOBUFS;
5425		goto bce_get_pg_buf_exit;
5426	}
5427
5428	DBRUN(sc->debug_pg_mbuf_alloc++);
5429
5430	m_new->m_len = MCLBYTES;
5431
5432	/* ToDo: Consider calling m_fragment() to test error handling. */
5433
5434	/* Map the mbuf cluster into device memory. */
5435	error = bus_dmamap_load_mbuf_sg(sc->pg_mbuf_tag,
5436	    sc->pg_mbuf_map[prod_idx], m_new, segs, &nsegs, BUS_DMA_NOWAIT);
5437
5438	/* Handle any mapping errors. */
5439	if (error) {
5440		BCE_PRINTF("%s(%d): Error mapping mbuf into page chain!\n",
5441		    __FILE__, __LINE__);
5442
5443		m_freem(m_new);
5444		DBRUN(sc->debug_pg_mbuf_alloc--);
5445
5446		rc = ENOBUFS;
5447		goto bce_get_pg_buf_exit;
5448	}
5449
5450	/* All mbufs must map to a single segment. */
5451	KASSERT(nsegs == 1, ("%s(): Too many segments returned (%d)!",
5452	    __FUNCTION__, nsegs));
5453
5454	/* ToDo: Do we need bus_dmamap_sync(,,BUS_DMASYNC_PREREAD) here? */
5455
5456	/*
5457	 * The page chain uses the same rx_bd data structure
5458	 * as the receive chain but doesn't require a byte sequence (bseq).
5459	 */
5460	pgbd = &sc->pg_bd_chain[PG_PAGE(prod_idx)][PG_IDX(prod_idx)];
5461
5462	pgbd->rx_bd_haddr_lo  = htole32(BCE_ADDR_LO(segs[0].ds_addr));
5463	pgbd->rx_bd_haddr_hi  = htole32(BCE_ADDR_HI(segs[0].ds_addr));
5464	pgbd->rx_bd_len       = htole32(MCLBYTES);
5465	pgbd->rx_bd_flags     = htole32(RX_BD_FLAGS_START | RX_BD_FLAGS_END);
5466
5467	/* Save the mbuf and update our counter. */
5468	sc->pg_mbuf_ptr[prod_idx] = m_new;
5469	sc->free_pg_bd--;
5470
5471	DBRUNMSG(BCE_INSANE_RECV,
5472	    bce_dump_pg_mbuf_chain(sc, debug_prod_idx, 1));
5473
5474	DBPRINT(sc, BCE_EXTREME_RECV, "%s(exit): prod = 0x%04X, "
5475	    "prod_idx = 0x%04X\n", __FUNCTION__, prod, prod_idx);
5476
5477bce_get_pg_buf_exit:
5478	DBEXIT(BCE_EXTREME_RESET | BCE_EXTREME_RECV | BCE_EXTREME_LOAD);
5479
5480	return(rc);
5481}
5482
5483/****************************************************************************/
5484/* Initialize the TX context memory.                                        */
5485/*                                                                          */
5486/* Returns:                                                                 */
5487/*   Nothing                                                                */
5488/****************************************************************************/
5489static void
5490bce_init_tx_context(struct bce_softc *sc)
5491{
5492	u32 val;
5493
5494	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5495
5496	/* Initialize the context ID for an L2 TX chain. */
5497	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
5498		/* Set the CID type to support an L2 connection. */
5499		val = BCE_L2CTX_TX_TYPE_TYPE_L2_XI |
5500		    BCE_L2CTX_TX_TYPE_SIZE_L2_XI;
5501		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE_XI, val);
5502		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2_XI | (8 << 16);
5503		CTX_WR(sc, GET_CID_ADDR(TX_CID),
5504		    BCE_L2CTX_TX_CMD_TYPE_XI, val);
5505
5506		/* Point the hardware to the first page in the chain. */
5507		val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5508		CTX_WR(sc, GET_CID_ADDR(TX_CID),
5509		    BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
5510		val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5511		CTX_WR(sc, GET_CID_ADDR(TX_CID),
5512		    BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
5513	} else {
5514		/* Set the CID type to support an L2 connection. */
5515		val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
5516		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
5517		val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
5518		CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_CMD_TYPE, val);
5519
5520		/* Point the hardware to the first page in the chain. */
5521		val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
5522		CTX_WR(sc, GET_CID_ADDR(TX_CID),
5523		    BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
5524		val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
5525		CTX_WR(sc, GET_CID_ADDR(TX_CID),
5526		    BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
5527	}
5528
5529	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_CTX);
5530}
5531
5532/****************************************************************************/
5533/* Allocate memory and initialize the TX data structures.                   */
5534/*                                                                          */
5535/* Returns:                                                                 */
5536/*   0 for success, positive value for failure.                             */
5537/****************************************************************************/
5538static int
5539bce_init_tx_chain(struct bce_softc *sc)
5540{
5541	struct tx_bd *txbd;
5542	int i, rc = 0;
5543
5544	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5545
5546	/* Set the initial TX producer/consumer indices. */
5547	sc->tx_prod        = 0;
5548	sc->tx_cons        = 0;
5549	sc->tx_prod_bseq   = 0;
5550	sc->used_tx_bd     = 0;
5551	sc->max_tx_bd      = USABLE_TX_BD_ALLOC;
5552	DBRUN(sc->tx_hi_watermark = 0);
5553	DBRUN(sc->tx_full_count = 0);
5554
5555	/*
5556	 * The NetXtreme II supports a linked-list structre called
5557	 * a Buffer Descriptor Chain (or BD chain).  A BD chain
5558	 * consists of a series of 1 or more chain pages, each of which
5559	 * consists of a fixed number of BD entries.
5560	 * The last BD entry on each page is a pointer to the next page
5561	 * in the chain, and the last pointer in the BD chain
5562	 * points back to the beginning of the chain.
5563	 */
5564
5565	/* Set the TX next pointer chain entries. */
5566	for (i = 0; i < sc->tx_pages; i++) {
5567		int j;
5568
5569		txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
5570
5571		/* Check if we've reached the last page. */
5572		if (i == (sc->tx_pages - 1))
5573			j = 0;
5574		else
5575			j = i + 1;
5576
5577		txbd->tx_bd_haddr_hi =
5578		    htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
5579		txbd->tx_bd_haddr_lo =
5580		    htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
5581	}
5582
5583	bce_init_tx_context(sc);
5584
5585	DBRUNMSG(BCE_INSANE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD_ALLOC));
5586	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_LOAD);
5587
5588	return(rc);
5589}
5590
5591/****************************************************************************/
5592/* Free memory and clear the TX data structures.                            */
5593/*                                                                          */
5594/* Returns:                                                                 */
5595/*   Nothing.                                                               */
5596/****************************************************************************/
5597static void
5598bce_free_tx_chain(struct bce_softc *sc)
5599{
5600	int i;
5601
5602	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5603
5604	/* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
5605	for (i = 0; i < MAX_TX_BD_AVAIL; i++) {
5606		if (sc->tx_mbuf_ptr[i] != NULL) {
5607			if (sc->tx_mbuf_map[i] != NULL)
5608				bus_dmamap_sync(sc->tx_mbuf_tag,
5609				    sc->tx_mbuf_map[i],
5610				    BUS_DMASYNC_POSTWRITE);
5611			m_freem(sc->tx_mbuf_ptr[i]);
5612			sc->tx_mbuf_ptr[i] = NULL;
5613			DBRUN(sc->debug_tx_mbuf_alloc--);
5614		}
5615	}
5616
5617	/* Clear each TX chain page. */
5618	for (i = 0; i < sc->tx_pages; i++)
5619		bzero((char *)sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
5620
5621	sc->used_tx_bd = 0;
5622
5623	/* Check if we lost any mbufs in the process. */
5624	DBRUNIF((sc->debug_tx_mbuf_alloc),
5625	    BCE_PRINTF("%s(%d): Memory leak! Lost %d mbufs "
5626	    "from tx chain!\n",	__FILE__, __LINE__,
5627	    sc->debug_tx_mbuf_alloc));
5628
5629	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_SEND | BCE_VERBOSE_UNLOAD);
5630}
5631
5632/****************************************************************************/
5633/* Initialize the RX context memory.                                        */
5634/*                                                                          */
5635/* Returns:                                                                 */
5636/*   Nothing                                                                */
5637/****************************************************************************/
5638static void
5639bce_init_rx_context(struct bce_softc *sc)
5640{
5641	u32 val;
5642
5643	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5644
5645	/* Init the type, size, and BD cache levels for the RX context. */
5646	val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
5647	    BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 |
5648	    (0x02 << BCE_L2CTX_RX_BD_PRE_READ_SHIFT);
5649
5650	/*
5651	 * Set the level for generating pause frames
5652	 * when the number of available rx_bd's gets
5653	 * too low (the low watermark) and the level
5654	 * when pause frames can be stopped (the high
5655	 * watermark).
5656	 */
5657	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
5658		u32 lo_water, hi_water;
5659
5660		if (sc->bce_flags & BCE_USING_TX_FLOW_CONTROL) {
5661			lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
5662		} else {
5663			lo_water = 0;
5664		}
5665
5666		if (lo_water >= USABLE_RX_BD_ALLOC) {
5667			lo_water = 0;
5668		}
5669
5670		hi_water = USABLE_RX_BD_ALLOC / 4;
5671
5672		if (hi_water <= lo_water) {
5673			lo_water = 0;
5674		}
5675
5676		lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
5677		hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
5678
5679		if (hi_water > 0xf)
5680			hi_water = 0xf;
5681		else if (hi_water == 0)
5682			lo_water = 0;
5683
5684		val |= (lo_water << BCE_L2CTX_RX_LO_WATER_MARK_SHIFT) |
5685		    (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
5686	}
5687
5688	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
5689
5690	/* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
5691	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
5692		val = REG_RD(sc, BCE_MQ_MAP_L2_5);
5693		REG_WR(sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
5694	}
5695
5696	/* Point the hardware to the first page in the chain. */
5697	val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
5698	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
5699	val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
5700	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
5701
5702	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_CTX);
5703}
5704
5705/****************************************************************************/
5706/* Allocate memory and initialize the RX data structures.                   */
5707/*                                                                          */
5708/* Returns:                                                                 */
5709/*   0 for success, positive value for failure.                             */
5710/****************************************************************************/
5711static int
5712bce_init_rx_chain(struct bce_softc *sc)
5713{
5714	struct rx_bd *rxbd;
5715	int i, rc = 0;
5716
5717	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5718	    BCE_VERBOSE_CTX);
5719
5720	/* Initialize the RX producer and consumer indices. */
5721	sc->rx_prod        = 0;
5722	sc->rx_cons        = 0;
5723	sc->rx_prod_bseq   = 0;
5724	sc->free_rx_bd     = USABLE_RX_BD_ALLOC;
5725	sc->max_rx_bd      = USABLE_RX_BD_ALLOC;
5726
5727	/* Initialize the RX next pointer chain entries. */
5728	for (i = 0; i < sc->rx_pages; i++) {
5729		int j;
5730
5731		rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
5732
5733		/* Check if we've reached the last page. */
5734		if (i == (sc->rx_pages - 1))
5735			j = 0;
5736		else
5737			j = i + 1;
5738
5739		/* Setup the chain page pointers. */
5740		rxbd->rx_bd_haddr_hi =
5741		    htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
5742		rxbd->rx_bd_haddr_lo =
5743		    htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
5744	}
5745
5746	/* Fill up the RX chain. */
5747	bce_fill_rx_chain(sc);
5748
5749	DBRUN(sc->rx_low_watermark = USABLE_RX_BD_ALLOC);
5750	DBRUN(sc->rx_empty_count = 0);
5751	for (i = 0; i < sc->rx_pages; i++) {
5752		bus_dmamap_sync(sc->rx_bd_chain_tag, sc->rx_bd_chain_map[i],
5753		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5754	}
5755
5756	bce_init_rx_context(sc);
5757
5758	DBRUNMSG(BCE_EXTREME_RECV,
5759	    bce_dump_rx_bd_chain(sc, 0, TOTAL_RX_BD_ALLOC));
5760	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5761	    BCE_VERBOSE_CTX);
5762
5763	/* ToDo: Are there possible failure modes here? */
5764
5765	return(rc);
5766}
5767
5768/****************************************************************************/
5769/* Add mbufs to the RX chain until its full or an mbuf allocation error     */
5770/* occurs.                                                                  */
5771/*                                                                          */
5772/* Returns:                                                                 */
5773/*   Nothing                                                                */
5774/****************************************************************************/
5775static void
5776bce_fill_rx_chain(struct bce_softc *sc)
5777{
5778	u16 prod, prod_idx;
5779	u32 prod_bseq;
5780
5781	DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5782	    BCE_VERBOSE_CTX);
5783
5784	/* Get the RX chain producer indices. */
5785	prod      = sc->rx_prod;
5786	prod_bseq = sc->rx_prod_bseq;
5787
5788	/* Keep filling the RX chain until it's full. */
5789	while (sc->free_rx_bd > 0) {
5790		prod_idx = RX_CHAIN_IDX(prod);
5791		if (bce_get_rx_buf(sc, prod, prod_idx, &prod_bseq)) {
5792			/* Bail out if we can't add an mbuf to the chain. */
5793			break;
5794		}
5795		prod = NEXT_RX_BD(prod);
5796	}
5797
5798	/* Save the RX chain producer indices. */
5799	sc->rx_prod      = prod;
5800	sc->rx_prod_bseq = prod_bseq;
5801
5802	/* We should never end up pointing to a next page pointer. */
5803	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5804	    BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
5805	    __FUNCTION__, rx_prod));
5806
5807	/* Write the mailbox and tell the chip about the waiting rx_bd's. */
5808	REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, prod);
5809	REG_WR(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ, prod_bseq);
5810
5811	DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5812	    BCE_VERBOSE_CTX);
5813}
5814
5815/****************************************************************************/
5816/* Free memory and clear the RX data structures.                            */
5817/*                                                                          */
5818/* Returns:                                                                 */
5819/*   Nothing.                                                               */
5820/****************************************************************************/
5821static void
5822bce_free_rx_chain(struct bce_softc *sc)
5823{
5824	int i;
5825
5826	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5827
5828	/* Free any mbufs still in the RX mbuf chain. */
5829	for (i = 0; i < MAX_RX_BD_AVAIL; i++) {
5830		if (sc->rx_mbuf_ptr[i] != NULL) {
5831			if (sc->rx_mbuf_map[i] != NULL)
5832				bus_dmamap_sync(sc->rx_mbuf_tag,
5833				    sc->rx_mbuf_map[i],
5834				    BUS_DMASYNC_POSTREAD);
5835			m_freem(sc->rx_mbuf_ptr[i]);
5836			sc->rx_mbuf_ptr[i] = NULL;
5837			DBRUN(sc->debug_rx_mbuf_alloc--);
5838		}
5839	}
5840
5841	/* Clear each RX chain page. */
5842	for (i = 0; i < sc->rx_pages; i++)
5843		if (sc->rx_bd_chain[i] != NULL)
5844			bzero((char *)sc->rx_bd_chain[i],
5845			    BCE_RX_CHAIN_PAGE_SZ);
5846
5847	sc->free_rx_bd = sc->max_rx_bd;
5848
5849	/* Check if we lost any mbufs in the process. */
5850	DBRUNIF((sc->debug_rx_mbuf_alloc),
5851	    BCE_PRINTF("%s(): Memory leak! Lost %d mbufs from rx chain!\n",
5852	    __FUNCTION__, sc->debug_rx_mbuf_alloc));
5853
5854	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5855}
5856
5857/****************************************************************************/
5858/* Allocate memory and initialize the page data structures.                 */
5859/* Assumes that bce_init_rx_chain() has not already been called.            */
5860/*                                                                          */
5861/* Returns:                                                                 */
5862/*   0 for success, positive value for failure.                             */
5863/****************************************************************************/
5864static int
5865bce_init_pg_chain(struct bce_softc *sc)
5866{
5867	struct rx_bd *pgbd;
5868	int i, rc = 0;
5869	u32 val;
5870
5871	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5872		BCE_VERBOSE_CTX);
5873
5874	/* Initialize the page producer and consumer indices. */
5875	sc->pg_prod        = 0;
5876	sc->pg_cons        = 0;
5877	sc->free_pg_bd     = USABLE_PG_BD_ALLOC;
5878	sc->max_pg_bd      = USABLE_PG_BD_ALLOC;
5879	DBRUN(sc->pg_low_watermark = sc->max_pg_bd);
5880	DBRUN(sc->pg_empty_count = 0);
5881
5882	/* Initialize the page next pointer chain entries. */
5883	for (i = 0; i < sc->pg_pages; i++) {
5884		int j;
5885
5886		pgbd = &sc->pg_bd_chain[i][USABLE_PG_BD_PER_PAGE];
5887
5888		/* Check if we've reached the last page. */
5889		if (i == (sc->pg_pages - 1))
5890			j = 0;
5891		else
5892			j = i + 1;
5893
5894		/* Setup the chain page pointers. */
5895		pgbd->rx_bd_haddr_hi =
5896		    htole32(BCE_ADDR_HI(sc->pg_bd_chain_paddr[j]));
5897		pgbd->rx_bd_haddr_lo =
5898		    htole32(BCE_ADDR_LO(sc->pg_bd_chain_paddr[j]));
5899	}
5900
5901	/* Setup the MQ BIN mapping for host_pg_bidx. */
5902	if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709)
5903		REG_WR(sc, BCE_MQ_MAP_L2_3, BCE_MQ_MAP_L2_3_DEFAULT);
5904
5905	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, 0);
5906
5907	/* Configure the rx_bd and page chain mbuf cluster size. */
5908	val = (sc->rx_bd_mbuf_data_len << 16) | MCLBYTES;
5909	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_PG_BUF_SIZE, val);
5910
5911	/* Configure the context reserved for jumbo support. */
5912	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_RBDC_KEY,
5913		BCE_L2CTX_RX_RBDC_JUMBO_KEY);
5914
5915	/* Point the hardware to the first page in the page chain. */
5916	val = BCE_ADDR_HI(sc->pg_bd_chain_paddr[0]);
5917	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_HI, val);
5918	val = BCE_ADDR_LO(sc->pg_bd_chain_paddr[0]);
5919	CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_PG_BDHADDR_LO, val);
5920
5921	/* Fill up the page chain. */
5922	bce_fill_pg_chain(sc);
5923
5924	for (i = 0; i < sc->pg_pages; i++) {
5925		bus_dmamap_sync(sc->pg_bd_chain_tag, sc->pg_bd_chain_map[i],
5926		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5927	}
5928
5929	DBRUNMSG(BCE_EXTREME_RECV,
5930	    bce_dump_pg_chain(sc, 0, TOTAL_PG_BD_ALLOC));
5931	DBEXIT(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_LOAD |
5932		BCE_VERBOSE_CTX);
5933	return(rc);
5934}
5935
5936/****************************************************************************/
5937/* Add mbufs to the page chain until its full or an mbuf allocation error   */
5938/* occurs.                                                                  */
5939/*                                                                          */
5940/* Returns:                                                                 */
5941/*   Nothing                                                                */
5942/****************************************************************************/
5943static void
5944bce_fill_pg_chain(struct bce_softc *sc)
5945{
5946	u16 prod, prod_idx;
5947
5948	DBENTER(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5949	    BCE_VERBOSE_CTX);
5950
5951	/* Get the page chain prodcuer index. */
5952	prod = sc->pg_prod;
5953
5954	/* Keep filling the page chain until it's full. */
5955	while (sc->free_pg_bd > 0) {
5956		prod_idx = PG_CHAIN_IDX(prod);
5957		if (bce_get_pg_buf(sc, prod, prod_idx)) {
5958			/* Bail out if we can't add an mbuf to the chain. */
5959			break;
5960		}
5961		prod = NEXT_PG_BD(prod);
5962	}
5963
5964	/* Save the page chain producer index. */
5965	sc->pg_prod = prod;
5966
5967	DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
5968	    BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
5969	    __FUNCTION__, pg_prod));
5970
5971	/*
5972	 * Write the mailbox and tell the chip about
5973	 * the new rx_bd's in the page chain.
5974	 */
5975	REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_PG_BDIDX,
5976	    prod);
5977
5978	DBEXIT(BCE_VERBOSE_RESET | BCE_EXTREME_RECV | BCE_VERBOSE_LOAD |
5979	    BCE_VERBOSE_CTX);
5980}
5981
5982/****************************************************************************/
5983/* Free memory and clear the RX data structures.                            */
5984/*                                                                          */
5985/* Returns:                                                                 */
5986/*   Nothing.                                                               */
5987/****************************************************************************/
5988static void
5989bce_free_pg_chain(struct bce_softc *sc)
5990{
5991	int i;
5992
5993	DBENTER(BCE_VERBOSE_RESET | BCE_VERBOSE_RECV | BCE_VERBOSE_UNLOAD);
5994
5995	/* Free any mbufs still in the mbuf page chain. */
5996	for (i = 0; i < MAX_PG_BD_AVAIL; i++) {
5997		if (sc->pg_mbuf_ptr[i] != NULL) {
5998			if (sc->pg_mbuf_map[i] != NULL)
5999				bus_dmamap_sync(sc->pg_mbuf_tag,
6000				    sc->pg_mbuf_map[i],
6001				    BUS_DMASYNC_POSTREAD);
6002			m_freem(sc->