1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright (c) 2002-2006 Neterion, Inc.
22  */
23 
24 #include "xgehal-device.h"
25 #include "xgehal-channel.h"
26 #include "xgehal-fifo.h"
27 #include "xgehal-ring.h"
28 #include "xgehal-driver.h"
29 #include "xgehal-mgmt.h"
30 
31 #define SWITCH_SIGN	0xA5A5A5A5A5A5A5A5ULL
32 #define	END_SIGN	0x0
33 
34 #ifdef XGE_HAL_HERC_EMULATION
35 #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR
36 #endif
37 
38 /*
39  * Jenkins hash key length(in bytes)
40  */
41 #define XGE_HAL_JHASH_MSG_LEN 50
42 
43 /*
44  * mix(a,b,c) used in Jenkins hash algorithm
45  */
46 #define mix(a,b,c) { \
47 	a -= b; a -= c; a ^= (c>>13); \
48 	b -= c; b -= a; b ^= (a<<8);  \
49 	c -= a; c -= b; c ^= (b>>13); \
50 	a -= b; a -= c; a ^= (c>>12); \
51 	b -= c; b -= a; b ^= (a<<16); \
52 	c -= a; c -= b; c ^= (b>>5);  \
53 	a -= b; a -= c; a ^= (c>>3);  \
54 	b -= c; b -= a; b ^= (a<<10); \
55 	c -= a; c -= b; c ^= (b>>15); \
56 }
57 
58 extern xge_hal_driver_t *g_xge_hal_driver;
59 
60 /*
61  * __hal_device_event_queued
62  * @data: pointer to xge_hal_device_t structure
63  *
64  * Will be called when new event succesfully queued.
65  */
66 void
67 __hal_device_event_queued(void *data, int event_type)
68 {
69 	xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC);
70 	if (g_xge_hal_driver->uld_callbacks.event_queued) {
71 		g_xge_hal_driver->uld_callbacks.event_queued(data, event_type);
72 	}
73 }
74 
75 /*
76  * __hal_pio_mem_write32_upper
77  *
78  * Endiann-aware implementation of xge_os_pio_mem_write32().
79  * Since Xframe has 64bit registers, we differintiate uppper and lower
80  * parts.
81  */
82 void
83 __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
84 {
85 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
86 	xge_os_pio_mem_write32(pdev, regh, val, addr);
87 #else
88 	xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4));
89 #endif
90 }
91 
92 /*
93  * __hal_pio_mem_write32_upper
94  *
95  * Endiann-aware implementation of xge_os_pio_mem_write32().
96  * Since Xframe has 64bit registers, we differintiate uppper and lower
97  * parts.
98  */
99 void
100 __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val,
101                             void *addr)
102 {
103 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
104 	xge_os_pio_mem_write32(pdev, regh, val,
105                                (void *) ((char *)addr +	4));
106 #else
107 	xge_os_pio_mem_write32(pdev, regh, val, addr);
108 #endif
109 }
110 
111 /*
112  * __hal_device_register_poll
113  * @hldev: pointer to xge_hal_device_t structure
114  * @reg: register to poll for
115  * @op: 0 - bit reset, 1 - bit set
116  * @mask: mask for logical "and" condition based on %op
117  * @max_millis: maximum time to try to poll in milliseconds
118  *
119  * Will poll certain register for specified amount of time.
120  * Will poll until masked bit is not cleared.
121  */
122 xge_hal_status_e
123 __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg,
124 			   int op, u64 mask, int max_millis)
125 {
126 	u64 val64;
127 	int i = 0;
128 	xge_hal_status_e ret = XGE_HAL_FAIL;
129 
130 	xge_os_udelay(10);
131 
132 	do {
133 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
134 		if (op == 0 && !(val64 & mask))
135 			return XGE_HAL_OK;
136 		else if (op == 1 && (val64 & mask) == mask)
137 			return XGE_HAL_OK;
138 		xge_os_udelay(100);
139 	} while (++i <= 9);
140 
141 	do {
142 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
143 		if (op == 0 && !(val64 & mask))
144 			return XGE_HAL_OK;
145 		else if (op == 1 && (val64 & mask) == mask)
146 			return XGE_HAL_OK;
147 		xge_os_udelay(1000);
148 	} while (++i < max_millis);
149 
150 	return ret;
151 }
152 
153 /*
154  * __hal_device_wait_quiescent
155  * @hldev: the device
156  * @hw_status: hw_status in case of error
157  *
158  * Will wait until device is quiescent for some blocks.
159  */
160 static xge_hal_status_e
161 __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status)
162 {
163 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
164 
165 	/* poll and wait first */
166 #ifdef XGE_HAL_HERC_EMULATION
167 	(void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
168 			(XGE_HAL_ADAPTER_STATUS_TDMA_READY |
169 			 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
170 			 XGE_HAL_ADAPTER_STATUS_PFC_READY |
171 			 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
172 			 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
173 			 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
174 			 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
175 			 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK),
176 			 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
177 #else
178 	(void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
179 			(XGE_HAL_ADAPTER_STATUS_TDMA_READY |
180 			 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
181 			 XGE_HAL_ADAPTER_STATUS_PFC_READY |
182 			 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
183 			 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
184 			 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
185 			 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
186 			 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK |
187 			 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK),
188 			 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
189 #endif
190 
191 	return xge_hal_device_status(hldev, hw_status);
192 }
193 
194 /**
195  * xge_hal_device_is_slot_freeze
196  * @devh: the device
197  *
198  * Returns non-zero if the slot is freezed.
199  * The determination is made based on the adapter_status
200  * register which will never give all FFs, unless PCI read
201  * cannot go through.
202  */
203 int
204 xge_hal_device_is_slot_freeze(xge_hal_device_h devh)
205 {
206 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
207 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
208 	u16 device_id;
209 	u64 adapter_status =
210 		xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
211 				      &bar0->adapter_status);
212 	xge_os_pci_read16(hldev->pdev,hldev->cfgh,
213 			xge_offsetof(xge_hal_pci_config_le_t, device_id),
214 			&device_id);
215 #ifdef TX_DEBUG
216 	if (adapter_status == XGE_HAL_ALL_FOXES)
217 	{
218 		u64 dummy;
219 		dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
220 						&bar0->pcc_enable);
221 		printf(">>> Slot is frozen!\n");
222 		brkpoint(0);
223 	}
224 #endif
225 	return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff));
226 }
227 
228 
229 /*
230  * __hal_device_led_actifity_fix
231  * @hldev: pointer to xge_hal_device_t structure
232  *
233  * SXE-002: Configure link and activity LED to turn it off
234  */
235 static void
236 __hal_device_led_actifity_fix(xge_hal_device_t *hldev)
237 {
238 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
239 	u16 subid;
240 	u64 val64;
241 
242 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
243 		xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid);
244 
245 	/*
246 	 *  In the case of Herc, there is a new register named beacon control
247 	 *  is added which was not present in Xena.
248 	 *  Beacon control register in Herc is at the same offset as
249 	 *  gpio control register in Xena.  It means they are one and same in
250 	 *  the case of Xena. Also, gpio control register offset in Herc and
251 	 *  Xena is different.
252 	 *  The current register map represents Herc(It means we have
253 	 *  both beacon  and gpio control registers in register map).
254 	 *  WRT transition from Xena to Herc, all the code in Xena which was
255 	 *  using  gpio control register for LED handling would  have to
256 	 *  use beacon control register in Herc and the rest of the code
257 	 *  which uses gpio control in Xena  would use the same register
258 	 *  in Herc.
259 	 *  WRT LED handling(following code), In the case of Herc, beacon
260 	 *  control register has to be used. This is applicable for Xena also,
261 	 *  since it represents the gpio control register in Xena.
262 	 */
263 	if ((subid & 0xFF) >= 0x07) {
264 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
265 		                            &bar0->beacon_control);
266 		val64 |= 0x0000800000000000ULL;
267 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
268 				     val64, &bar0->beacon_control);
269 		val64 = 0x0411040400000000ULL;
270 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
271 				    (void *) ((u8 *)bar0 + 0x2700));
272 	}
273 }
274 
275 /* Constants for Fixing the MacAddress problem seen mostly on
276  * Alpha machines.
277  */
278 static u64 xena_fix_mac[] = {
279 	0x0060000000000000ULL, 0x0060600000000000ULL,
280 	0x0040600000000000ULL, 0x0000600000000000ULL,
281 	0x0020600000000000ULL, 0x0060600000000000ULL,
282 	0x0020600000000000ULL, 0x0060600000000000ULL,
283 	0x0020600000000000ULL, 0x0060600000000000ULL,
284 	0x0020600000000000ULL, 0x0060600000000000ULL,
285 	0x0020600000000000ULL, 0x0060600000000000ULL,
286 	0x0020600000000000ULL, 0x0060600000000000ULL,
287 	0x0020600000000000ULL, 0x0060600000000000ULL,
288 	0x0020600000000000ULL, 0x0060600000000000ULL,
289 	0x0020600000000000ULL, 0x0060600000000000ULL,
290 	0x0020600000000000ULL, 0x0060600000000000ULL,
291 	0x0020600000000000ULL, 0x0000600000000000ULL,
292 	0x0040600000000000ULL, 0x0060600000000000ULL,
293 	END_SIGN
294 };
295 
296 /*
297  * __hal_device_fix_mac
298  * @hldev: HAL device handle.
299  *
300  * Fix for all "FFs" MAC address problems observed on Alpha platforms.
301  */
302 static void
303 __hal_device_xena_fix_mac(xge_hal_device_t *hldev)
304 {
305 	int i = 0;
306 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
307 
308 	/*
309 	 *  In the case of Herc, there is a new register named beacon control
310 	 *  is added which was not present in Xena.
311 	 *  Beacon control register in Herc is at the same offset as
312 	 *  gpio control register in Xena.  It means they are one and same in
313 	 *  the case of Xena. Also, gpio control register offset in Herc and
314 	 *  Xena is different.
315 	 *  The current register map represents Herc(It means we have
316 	 *  both beacon  and gpio control registers in register map).
317 	 *  WRT transition from Xena to Herc, all the code in Xena which was
318 	 *  using  gpio control register for LED handling would  have to
319 	 *  use beacon control register in Herc and the rest of the code
320 	 *  which uses gpio control in Xena  would use the same register
321 	 *  in Herc.
322 	 *  In the following code(xena_fix_mac), beacon control register has
323 	 *  to be used in the case of Xena, since it represents gpio control
324 	 *  register. In the case of Herc, there is no change required.
325 	 */
326 	while (xena_fix_mac[i] != END_SIGN) {
327 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
328 				xena_fix_mac[i++], &bar0->beacon_control);
329 		xge_os_mdelay(1);
330 	}
331 }
332 
333 /*
334  * xge_hal_device_bcast_enable
335  * @hldev: HAL device handle.
336  *
337  * Enable receiving broadcasts.
338  * The host must first write RMAC_CFG_KEY "key"
339  * register, and then - MAC_CFG register.
340  */
341 void
342 xge_hal_device_bcast_enable(xge_hal_device_h devh)
343 {
344 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
345 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
346 	u64 val64;
347 
348 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
349 	&bar0->mac_cfg);
350 		val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE;
351 
352 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
353 		XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
354 
355     __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
356 		(u32)(val64 >> 32), &bar0->mac_cfg);
357 
358 	xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
359 		(unsigned long long)val64,
360 		hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
361 }
362 
363 /*
364  * xge_hal_device_bcast_disable
365  * @hldev: HAL device handle.
366  *
367  * Disable receiving broadcasts.
368  * The host must first write RMAC_CFG_KEY "key"
369  * register, and then - MAC_CFG register.
370  */
371 void
372 xge_hal_device_bcast_disable(xge_hal_device_h devh)
373 {
374 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
375 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
376 	u64 val64;
377 
378 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
379 	&bar0->mac_cfg);
380 
381 	val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE);
382 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
383 		     XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
384 
385         __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
386 		    (u32)(val64 >> 32), &bar0->mac_cfg);
387 
388 	xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
389 		(unsigned long long)val64,
390 		hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
391 }
392 
393 /*
394  * __hal_device_shared_splits_configure
395  * @hldev: HAL device handle.
396  *
397  * TxDMA will stop Read request if the number of read split had exceeded
398  * the limit set by shared_splits
399  */
400 static void
401 __hal_device_shared_splits_configure(xge_hal_device_t *hldev)
402 {
403 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
404 	u64 val64;
405 
406 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
407 	                            &bar0->pic_control);
408 	val64 |=
409 	XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits);
410 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
411 			     &bar0->pic_control);
412 	xge_debug_device(XGE_TRACE, "%s", "shared splits configured");
413 }
414 
415 /*
416  * __hal_device_rmac_padding_configure
417  * @hldev: HAL device handle.
418  *
419  * Configure RMAC frame padding. Depends on configuration, it
420  * can be send to host or removed by MAC.
421  */
422 static void
423 __hal_device_rmac_padding_configure(xge_hal_device_t *hldev)
424 {
425 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
426 	u64 val64;
427 
428 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
429 		    XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
430 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
431 	&bar0->mac_cfg);
432 	val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE );
433 	val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE );
434 	val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD;
435 
436 	/*
437 	 * If the RTH enable bit is not set, strip the FCS
438 	 */
439 	if (!hldev->config.rth_en ||
440 	    !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
441 			   &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) {
442 		val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS;
443 	}
444 
445 	val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD );
446 	val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM;
447 
448 	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
449 		    (u32)(val64 >> 32), (char*)&bar0->mac_cfg);
450 	xge_os_mdelay(1);
451 
452 	xge_debug_device(XGE_TRACE,
453 		  "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured",
454 		  (unsigned long long)val64);
455 }
456 
457 /*
458  * __hal_device_pause_frames_configure
459  * @hldev: HAL device handle.
460  *
461  * Set Pause threshold.
462  *
463  * Pause frame is generated if the amount of data outstanding
464  * on any queue exceeded the ratio of
465  * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
466  */
467 static void
468 __hal_device_pause_frames_configure(xge_hal_device_t *hldev)
469 {
470 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
471 	int i;
472 	u64 val64;
473 
474 	switch (hldev->config.mac.media) {
475 		case XGE_HAL_MEDIA_SR:
476 		case XGE_HAL_MEDIA_SW:
477 			val64=0xfffbfffbfffbfffbULL;
478 			break;
479 		case XGE_HAL_MEDIA_LR:
480 		case XGE_HAL_MEDIA_LW:
481 			val64=0xffbbffbbffbbffbbULL;
482 			break;
483 		case XGE_HAL_MEDIA_ER:
484 		case XGE_HAL_MEDIA_EW:
485 		default:
486 			val64=0xffbbffbbffbbffbbULL;
487 			break;
488 	}
489 
490 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
491 			val64, &bar0->mc_pause_thresh_q0q3);
492 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
493 			val64, &bar0->mc_pause_thresh_q4q7);
494 
495 	/* Set the time value  to be inserted in the pause frame generated
496 	 * by Xframe */
497 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
498 	                            &bar0->rmac_pause_cfg);
499 	if (hldev->config.mac.rmac_pause_gen_en)
500 		val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN;
501 	else
502 		val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN);
503 	if (hldev->config.mac.rmac_pause_rcv_en)
504 		val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN;
505 	else
506 		val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN);
507 	val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff));
508 	val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time);
509 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
510 			     &bar0->rmac_pause_cfg);
511 
512 	val64 = 0;
513 	for (i = 0; i<4; i++) {
514 		val64 |=
515 		     (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3)
516 							<<(i*2*8));
517 	}
518 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
519 			     &bar0->mc_pause_thresh_q0q3);
520 
521 	val64 = 0;
522 	for (i = 0; i<4; i++) {
523 		val64 |=
524 		     (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7)
525 							<<(i*2*8));
526 	}
527 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
528 			     &bar0->mc_pause_thresh_q4q7);
529 	xge_debug_device(XGE_TRACE, "%s", "pause frames configured");
530 }
531 
532 /*
533  * Herc's clock rate doubled, unless the slot is 33MHz.
534  */
535 unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev,
536 				      unsigned int time_ival)
537 {
538 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
539 		return time_ival;
540 
541 	xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC);
542 
543 	if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN &&
544 	    hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ)
545 		time_ival *= 2;
546 
547 	return time_ival;
548 }
549 
550 
551 /*
552  * __hal_device_bus_master_disable
553  * @hldev: HAL device handle.
554  *
555  * Disable bus mastership.
556  */
557 static void
558 __hal_device_bus_master_disable (xge_hal_device_t *hldev)
559 {
560 	u16 cmd;
561 	u16 bus_master = 4;
562 
563 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
564 			xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
565 	cmd &= ~bus_master;
566 	xge_os_pci_write16(hldev->pdev, hldev->cfgh,
567 			 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
568 }
569 
570 /*
571  * __hal_device_bus_master_enable
572  * @hldev: HAL device handle.
573  *
574  * Disable bus mastership.
575  */
576 static void
577 __hal_device_bus_master_enable (xge_hal_device_t *hldev)
578 {
579 	u16 cmd;
580 	u16 bus_master = 4;
581 
582 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
583 			xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
584 
585 	/* already enabled? do nothing */
586 	if (cmd & bus_master)
587 		return;
588 
589 	cmd |= bus_master;
590 	xge_os_pci_write16(hldev->pdev, hldev->cfgh,
591 			 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
592 }
593 /*
594  * __hal_device_intr_mgmt
595  * @hldev: HAL device handle.
596  * @mask: mask indicating which Intr block must be modified.
597  * @flag: if true - enable, otherwise - disable interrupts.
598  *
599  * Disable or enable device interrupts. Mask is used to specify
600  * which hardware blocks should produce interrupts. For details
601  * please refer to Xframe User Guide.
602  */
603 static void
604 __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
605 {
606 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
607 	u64 val64 = 0, temp64 = 0;
608 	u64 gim, gim_saved;
609 
610 	gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev,
611                               hldev->regh0, &bar0->general_int_mask);
612 
613 	/* Top level interrupt classification */
614 	/* PIC Interrupts */
615 	if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) {
616 		/* Enable PIC Intrs in the general intr mask register */
617 		val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/;
618 		if (flag) {
619 			gim &= ~((u64) val64);
620 			temp64 = xge_os_pio_mem_read64(hldev->pdev,
621 					hldev->regh0, &bar0->pic_int_mask);
622 
623 			temp64 &= ~XGE_HAL_PIC_INT_TX;
624 #ifdef  XGE_HAL_PROCESS_LINK_INT_IN_ISR
625 			if (xge_hal_device_check_id(hldev) ==
626 							XGE_HAL_CARD_HERC) {
627 				temp64 &= ~XGE_HAL_PIC_INT_MISC;
628 			}
629 #endif
630 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
631 					     temp64, &bar0->pic_int_mask);
632 #ifdef  XGE_HAL_PROCESS_LINK_INT_IN_ISR
633 			if (xge_hal_device_check_id(hldev) ==
634 							XGE_HAL_CARD_HERC) {
635 				/*
636 				 * Unmask only Link Up interrupt
637 				 */
638 				temp64 = xge_os_pio_mem_read64(hldev->pdev,
639 					hldev->regh0, &bar0->misc_int_mask);
640 				temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
641 				xge_os_pio_mem_write64(hldev->pdev,
642 					      hldev->regh0, temp64,
643 					      &bar0->misc_int_mask);
644 				xge_debug_device(XGE_TRACE,
645 					"unmask link up flag "XGE_OS_LLXFMT,
646 					(unsigned long long)temp64);
647 			}
648 #endif
649 		} else { /* flag == 0 */
650 
651 #ifdef  XGE_HAL_PROCESS_LINK_INT_IN_ISR
652 			if (xge_hal_device_check_id(hldev) ==
653 							XGE_HAL_CARD_HERC) {
654 				/*
655 				 * Mask both Link Up and Down interrupts
656 				 */
657 				temp64 = xge_os_pio_mem_read64(hldev->pdev,
658 					hldev->regh0, &bar0->misc_int_mask);
659 				temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
660 				temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
661 				xge_os_pio_mem_write64(hldev->pdev,
662 					      hldev->regh0, temp64,
663 					      &bar0->misc_int_mask);
664 				xge_debug_device(XGE_TRACE,
665 					"mask link up/down flag "XGE_OS_LLXFMT,
666 					(unsigned long long)temp64);
667 			}
668 #endif
669 			/* Disable PIC Intrs in the general intr mask
670 			 * register */
671 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
672 					     XGE_HAL_ALL_INTRS_DIS,
673 			                     &bar0->pic_int_mask);
674 			gim |= val64;
675 		}
676 	}
677 
678 	/*  DMA Interrupts */
679 	/*  Enabling/Disabling Tx DMA interrupts */
680 	if (mask & XGE_HAL_TX_DMA_INTR) {
681 		/*  Enable TxDMA Intrs in the general intr mask register */
682 		val64 = XGE_HAL_TXDMA_INT_M;
683 		if (flag) {
684 			gim &= ~((u64) val64);
685 			/* Disable all TxDMA interrupts */
686 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
687 					     XGE_HAL_ALL_INTRS_DIS,
688 				             &bar0->txdma_int_mask);
689 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
690 					     XGE_HAL_ALL_INTRS_DIS,
691 				             &bar0->pfc_err_mask);
692 
693 		} else { /* flag == 0 */
694 
695 			/*  Disable TxDMA Intrs in the general intr mask
696 			 *  register */
697 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
698 					     XGE_HAL_ALL_INTRS_DIS,
699 			                     &bar0->txdma_int_mask);
700 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
701 					     XGE_HAL_ALL_INTRS_DIS,
702 			                     &bar0->pfc_err_mask);
703 
704 			gim |= val64;
705 		}
706 	}
707 
708 	/*  Enabling/Disabling Rx DMA interrupts */
709 	if (mask & XGE_HAL_RX_DMA_INTR) {
710 		/*  Enable RxDMA Intrs in the general intr mask register */
711 		val64 = XGE_HAL_RXDMA_INT_M;
712 		if (flag) {
713 
714 			gim &= ~((u64) val64);
715 			/* All RxDMA block interrupts are disabled for now
716 			 * TODO */
717 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
718 					     XGE_HAL_ALL_INTRS_DIS,
719 			                     &bar0->rxdma_int_mask);
720 
721 		} else { /* flag == 0 */
722 
723 			/*  Disable RxDMA Intrs in the general intr mask
724 			 *  register */
725 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
726 					     XGE_HAL_ALL_INTRS_DIS,
727 			                     &bar0->rxdma_int_mask);
728 
729 			gim |= val64;
730 		}
731 	}
732 
733 	/*  MAC Interrupts */
734 	/*  Enabling/Disabling MAC interrupts */
735 	if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) {
736 		val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M;
737 		if (flag) {
738 
739 			gim &= ~((u64) val64);
740 
741 			/* All MAC block error inter. are disabled for now. */
742 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
743 			     XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
744 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
745 			     XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
746 
747 		} else { /* flag == 0 */
748 
749 			/* Disable MAC Intrs in the general intr mask
750 			 * register */
751 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
752 			     XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
753 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
754 			     XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
755 
756 			gim |= val64;
757 		}
758 	}
759 
760 	/*  XGXS Interrupts */
761 	if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) {
762 		val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M;
763 		if (flag) {
764 
765 			gim &= ~((u64) val64);
766 			/* All XGXS block error interrupts are disabled for now
767 			 * TODO */
768 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
769 			     XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
770 
771 		} else { /* flag == 0 */
772 
773 			/* Disable MC Intrs in the general intr mask register */
774 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
775 				XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
776 
777 			gim |= val64;
778 		}
779 	}
780 
781 	/*  Memory Controller(MC) interrupts */
782 	if (mask & XGE_HAL_MC_INTR) {
783 		val64 = XGE_HAL_MC_INT_M;
784 		if (flag) {
785 
786 			gim &= ~((u64) val64);
787 
788 			/* Enable all MC blocks error interrupts */
789 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
790 				     0x0ULL, &bar0->mc_int_mask);
791 
792 		} else { /* flag == 0 */
793 
794 			/* Disable MC Intrs in the general intr mask
795 			 * register */
796 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
797 				     XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask);
798 
799 			gim |= val64;
800 		}
801 	}
802 
803 
804 	/*  Tx traffic interrupts */
805 	if (mask & XGE_HAL_TX_TRAFFIC_INTR) {
806 		val64 = XGE_HAL_TXTRAFFIC_INT_M;
807 		if (flag) {
808 
809 			gim &= ~((u64) val64);
810 
811 			/* Enable all the Tx side interrupts */
812 			/* '0' Enables all 64 TX interrupt levels. */
813 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
814 			                    &bar0->tx_traffic_mask);
815 
816 		} else { /* flag == 0 */
817 
818 			/* Disable Tx Traffic Intrs in the general intr mask
819 			 * register. */
820 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
821 			                     XGE_HAL_ALL_INTRS_DIS,
822 			                     &bar0->tx_traffic_mask);
823 			gim |= val64;
824 		}
825 	}
826 
827 	/*  Rx traffic interrupts */
828 	if (mask & XGE_HAL_RX_TRAFFIC_INTR) {
829 		val64 = XGE_HAL_RXTRAFFIC_INT_M;
830 		if (flag) {
831 			gim &= ~((u64) val64);
832 			/* '0' Enables all 8 RX interrupt levels. */
833 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
834 			                    &bar0->rx_traffic_mask);
835 
836 		} else { /* flag == 0 */
837 
838 			/* Disable Rx Traffic Intrs in the general intr mask
839 			 * register.
840 			 */
841 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
842 			                     XGE_HAL_ALL_INTRS_DIS,
843 			                     &bar0->rx_traffic_mask);
844 
845 			gim |= val64;
846 		}
847 	}
848 
849 	/* Sched Timer interrupt */
850 	if (mask & XGE_HAL_SCHED_INTR) {
851 		if (flag) {
852 			temp64 = xge_os_pio_mem_read64(hldev->pdev,
853 					hldev->regh0, &bar0->txpic_int_mask);
854 			temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR;
855 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
856 					temp64, &bar0->txpic_int_mask);
857 
858 			xge_hal_device_sched_timer(hldev,
859 					hldev->config.sched_timer_us,
860 					hldev->config.sched_timer_one_shot);
861 		} else {
862 			temp64 = xge_os_pio_mem_read64(hldev->pdev,
863 					hldev->regh0, &bar0->txpic_int_mask);
864 			temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR;
865 
866 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
867 					temp64, &bar0->txpic_int_mask);
868 
869 			xge_hal_device_sched_timer(hldev,
870 					XGE_HAL_SCHED_TIMER_DISABLED,
871 					XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE);
872 		}
873 	}
874 
875 	if (gim != gim_saved) {
876 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim,
877 			&bar0->general_int_mask);
878 		xge_debug_device(XGE_TRACE, "general_int_mask updated "
879 			 XGE_OS_LLXFMT" => "XGE_OS_LLXFMT,
880 			(unsigned long long)gim_saved, (unsigned long long)gim);
881 	}
882 }
883 
884 /*
885  * __hal_device_bimodal_configure
886  * @hldev: HAL device handle.
887  *
888  * Bimodal parameters initialization.
889  */
890 static void
891 __hal_device_bimodal_configure(xge_hal_device_t *hldev)
892 {
893 	int i;
894 
895 	for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
896 		xge_hal_tti_config_t *tti;
897 		xge_hal_rti_config_t *rti;
898 
899 		if (!hldev->config.ring.queue[i].configured)
900 			continue;
901 		rti = &hldev->config.ring.queue[i].rti;
902 		tti = &hldev->bimodal_tti[i];
903 
904 		tti->enabled = 1;
905 		tti->urange_a = hldev->bimodal_urange_a_en * 10;
906 		tti->urange_b = 20;
907 		tti->urange_c = 30;
908 		tti->ufc_a = hldev->bimodal_urange_a_en * 8;
909 		tti->ufc_b = 16;
910 		tti->ufc_c = 32;
911 		tti->ufc_d = 64;
912 		tti->timer_val_us = hldev->bimodal_timer_val_us;
913 		tti->timer_ac_en = 1;
914 		tti->timer_ci_en = 0;
915 
916 		rti->urange_a = 10;
917 		rti->urange_b = 20;
918 		rti->urange_c = 30;
919 		rti->ufc_a = 1; /* <= for netpipe type of tests */
920 		rti->ufc_b = 4;
921 		rti->ufc_c = 4;
922 		rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */
923 		rti->timer_ac_en = 1;
924 		rti->timer_val_us = 5; /* for optimal bus efficiency usage */
925 	}
926 }
927 
928 /*
929  * __hal_device_tti_apply
930  * @hldev: HAL device handle.
931  *
932  * apply TTI configuration.
933  */
934 static xge_hal_status_e
935 __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti,
936 		       int num, int runtime)
937 {
938 	u64 val64, data1 = 0, data2 = 0;
939 	xge_hal_pci_bar0_t *bar0;
940 
941 	if (runtime)
942 		bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
943 	else
944 		bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
945 
946 	if (tti->timer_val_us) {
947 		unsigned int tx_interval;
948 
949 		if (hldev->config.pci_freq_mherz) {
950 			tx_interval = hldev->config.pci_freq_mherz *
951 					tti->timer_val_us / 64;
952 			tx_interval =
953 				__hal_fix_time_ival_herc(hldev,
954 							 tx_interval);
955 		} else {
956 			tx_interval = tti->timer_val_us;
957 		}
958 		data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval);
959 		if (tti->timer_ac_en) {
960 			data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN;
961 		}
962 		if (tti->timer_ci_en) {
963 			data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN;
964 		}
965 
966 		if (!runtime) {
967 			xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s",
968 				  num, tx_interval, tti->timer_ci_en ?
969 				  "enabled": "disabled");
970 		}
971 	}
972 
973 	if (tti->urange_a ||
974 	    tti->urange_b ||
975 	    tti->urange_c ||
976 	    tti->ufc_a ||
977 	    tti->ufc_b ||
978 	    tti->ufc_c ||
979 	    tti->ufc_d ) {
980 		data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) |
981 			 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) |
982 			 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c);
983 
984 		data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) |
985 			 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) |
986 			 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) |
987 			 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d);
988 	}
989 
990 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
991 			     &bar0->tti_data1_mem);
992 	(void)xge_os_pio_mem_read64(hldev->pdev,
993 		  hldev->regh0, &bar0->tti_data1_mem);
994 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
995 			     &bar0->tti_data2_mem);
996 	(void)xge_os_pio_mem_read64(hldev->pdev,
997 		  hldev->regh0, &bar0->tti_data2_mem);
998 	xge_os_wmb();
999 
1000 	val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD |
1001 	      XGE_HAL_TTI_CMD_MEM_OFFSET(num);
1002 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1003 		&bar0->tti_command_mem);
1004 
1005 	if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem,
1006 		   0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD,
1007 		   XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1008 		/* upper layer may require to repeat */
1009 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1010 	}
1011 
1012 	if (!runtime) {
1013 		xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x"
1014 		   XGE_OS_LLXFMT, num,
1015 		   (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1016 		   hldev->regh0, &bar0->tti_data1_mem));
1017 	}
1018 
1019 	return XGE_HAL_OK;
1020 }
1021 
1022 /*
1023  * __hal_device_tti_configure
1024  * @hldev: HAL device handle.
1025  *
1026  * TTI Initialization.
1027  * Initialize Transmit Traffic Interrupt Scheme.
1028  */
1029 static xge_hal_status_e
1030 __hal_device_tti_configure(xge_hal_device_t *hldev, int runtime)
1031 {
1032 	int i;
1033 
1034 	for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
1035 		int j;
1036 
1037 		if (!hldev->config.fifo.queue[i].configured)
1038 			continue;
1039 
1040 		for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
1041 			xge_hal_status_e status;
1042 
1043 			if (!hldev->config.fifo.queue[i].tti[j].enabled)
1044 				continue;
1045 
1046 			/* at least some TTI enabled. Record it. */
1047 			hldev->tti_enabled = 1;
1048 
1049 			status = __hal_device_tti_apply(hldev,
1050 				&hldev->config.fifo.queue[i].tti[j],
1051 				i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime);
1052 			if (status != XGE_HAL_OK)
1053 				return status;
1054 		}
1055 	}
1056 
1057 	/* processing bimodal TTIs */
1058 	for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1059 		xge_hal_status_e status;
1060 
1061 		if (!hldev->bimodal_tti[i].enabled)
1062 			continue;
1063 
1064 		/* at least some bimodal TTI enabled. Record it. */
1065 		hldev->tti_enabled = 1;
1066 
1067 		status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i],
1068 				XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime);
1069 		if (status != XGE_HAL_OK)
1070 			return status;
1071 
1072 	}
1073 
1074 	return XGE_HAL_OK;
1075 }
1076 
1077 /*
1078  * __hal_device_rti_configure
1079  * @hldev: HAL device handle.
1080  *
1081  * RTI Initialization.
1082  * Initialize Receive Traffic Interrupt Scheme.
1083  */
1084 xge_hal_status_e
1085 __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
1086 {
1087 	xge_hal_pci_bar0_t *bar0;
1088 	u64 val64, data1 = 0, data2 = 0;
1089 	int i;
1090 
1091 	if (runtime) {
1092 		/*
1093 		 * we don't want to re-configure RTI in case when
1094 		 * bimodal interrupts are in use. Instead reconfigure TTI
1095 		 * with new RTI values.
1096 		 */
1097 		if (hldev->config.bimodal_interrupts) {
1098 			__hal_device_bimodal_configure(hldev);
1099 			return __hal_device_tti_configure(hldev, 1);
1100 		}
1101 		bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
1102 	} else
1103 		bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1104 
1105 	for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1106 		xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti;
1107 
1108 		if (!hldev->config.ring.queue[i].configured)
1109 			continue;
1110 
1111 		if (rti->timer_val_us) {
1112 			unsigned int rx_interval;
1113 
1114 			if (hldev->config.pci_freq_mherz) {
1115 				rx_interval = hldev->config.pci_freq_mherz *
1116 						rti->timer_val_us / 8;
1117 				rx_interval =
1118 					__hal_fix_time_ival_herc(hldev,
1119 								 rx_interval);
1120 			} else {
1121 				rx_interval = rti->timer_val_us;
1122 			}
1123 			data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval);
1124 			if (rti->timer_ac_en) {
1125 				data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN;
1126 			}
1127 			data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN;
1128 		}
1129 
1130 		if (rti->urange_a ||
1131 		    rti->urange_b ||
1132 		    rti->urange_c ||
1133 		    rti->ufc_a ||
1134 		    rti->ufc_b ||
1135 		    rti->ufc_c ||
1136 		    rti->ufc_d) {
1137 			data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) |
1138 				XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) |
1139 				XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c);
1140 
1141 			data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) |
1142 				 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) |
1143 				 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) |
1144 				 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d);
1145 		}
1146 
1147 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
1148 				     &bar0->rti_data1_mem);
1149 		(void)xge_os_pio_mem_read64(hldev->pdev,
1150 			  hldev->regh0, &bar0->rti_data1_mem);
1151 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
1152 			             &bar0->rti_data2_mem);
1153 		(void)xge_os_pio_mem_read64(hldev->pdev,
1154 			  hldev->regh0, &bar0->rti_data2_mem);
1155 		xge_os_wmb();
1156 
1157 		val64 = XGE_HAL_RTI_CMD_MEM_WE |
1158 		XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD;
1159 		val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i);
1160 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1161 		                    &bar0->rti_command_mem);
1162 
1163 		if (!runtime && __hal_device_register_poll(hldev,
1164 			&bar0->rti_command_mem, 0,
1165 			XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD,
1166 			XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1167 			/* upper layer may require to repeat */
1168 			return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1169 		}
1170 
1171 		if (!runtime) {
1172 			xge_debug_device(XGE_TRACE,
1173 			  "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT,
1174 			  i,
1175 			  (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1176 					  hldev->regh0, &bar0->rti_data1_mem));
1177 		}
1178 	}
1179 
1180 	return XGE_HAL_OK;
1181 }
1182 
1183 
1184 /* Constants to be programmed into the Xena's registers to configure
1185  * the XAUI. */
1186 static u64 default_xena_mdio_cfg[] = {
1187 	/* Reset PMA PLL */
1188 	0xC001010000000000ULL, 0xC0010100000000E0ULL,
1189 	0xC0010100008000E4ULL,
1190 	/* Remove Reset from PMA PLL */
1191 	0xC001010000000000ULL, 0xC0010100000000E0ULL,
1192 	0xC0010100000000E4ULL,
1193 	END_SIGN
1194 };
1195 
1196 static u64 default_herc_mdio_cfg[] = {
1197 	END_SIGN
1198 };
1199 
1200 static u64 default_xena_dtx_cfg[] = {
1201 	0x8000051500000000ULL, 0x80000515000000E0ULL,
1202 	0x80000515D93500E4ULL, 0x8001051500000000ULL,
1203 	0x80010515000000E0ULL, 0x80010515001E00E4ULL,
1204 	0x8002051500000000ULL, 0x80020515000000E0ULL,
1205 	0x80020515F21000E4ULL,
1206 	/* Set PADLOOPBACKN */
1207 	0x8002051500000000ULL, 0x80020515000000E0ULL,
1208 	0x80020515B20000E4ULL, 0x8003051500000000ULL,
1209 	0x80030515000000E0ULL, 0x80030515B20000E4ULL,
1210 	0x8004051500000000ULL, 0x80040515000000E0ULL,
1211 	0x80040515B20000E4ULL, 0x8005051500000000ULL,
1212 	0x80050515000000E0ULL, 0x80050515B20000E4ULL,
1213 	SWITCH_SIGN,
1214 	/* Remove PADLOOPBACKN */
1215 	0x8002051500000000ULL, 0x80020515000000E0ULL,
1216 	0x80020515F20000E4ULL, 0x8003051500000000ULL,
1217 	0x80030515000000E0ULL, 0x80030515F20000E4ULL,
1218 	0x8004051500000000ULL, 0x80040515000000E0ULL,
1219 	0x80040515F20000E4ULL, 0x8005051500000000ULL,
1220 	0x80050515000000E0ULL, 0x80050515F20000E4ULL,
1221 	END_SIGN
1222 };
1223 
1224 /*
1225 static u64 default_herc_dtx_cfg[] = {
1226 	0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
1227 	0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
1228 	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1229 	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1230 	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1231 	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1232 	END_SIGN
1233 };
1234 */
1235 
1236 static u64 default_herc_dtx_cfg[] = {
1237     0x8000051536750000ULL, 0x80000515367500E0ULL,
1238     0x8000051536750004ULL, 0x80000515367500E4ULL,
1239 
1240     0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1241     0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1242 
1243     0x801205150D440000ULL, 0x801205150D4400E0ULL,
1244     0x801205150D440004ULL, 0x801205150D4400E4ULL,
1245 
1246     0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1247     0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1248     END_SIGN
1249 };
1250 
1251 /*
1252  * __hal_device_xaui_configure
1253  * @hldev: HAL device handle.
1254  *
1255  * Configure XAUI Interface of Xena.
1256  *
1257  * To Configure the Xena's XAUI, one has to write a series
1258  * of 64 bit values into two registers in a particular
1259  * sequence. Hence a macro 'SWITCH_SIGN' has been defined
1260  * which will be defined in the array of configuration values
1261  * (default_dtx_cfg & default_mdio_cfg) at appropriate places
1262  * to switch writing from one regsiter to another. We continue
1263  * writing these values until we encounter the 'END_SIGN' macro.
1264  * For example, After making a series of 21 writes into
1265  * dtx_control register the 'SWITCH_SIGN' appears and hence we
1266  * start writing into mdio_control until we encounter END_SIGN.
1267  */
1268 static void
1269 __hal_device_xaui_configure(xge_hal_device_t *hldev)
1270 {
1271 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1272 	int mdio_cnt = 0, dtx_cnt = 0;
1273 	u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL;
1274 
1275 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
1276 		default_dtx_cfg = default_xena_dtx_cfg;
1277 		default_mdio_cfg = default_xena_mdio_cfg;
1278 	} else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
1279 		default_dtx_cfg = default_herc_dtx_cfg;
1280 		default_mdio_cfg = default_herc_mdio_cfg;
1281 	} else
1282 		xge_assert(default_dtx_cfg);
1283 
1284 	do {
1285 	    dtx_cfg:
1286 		while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
1287 			if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
1288 				dtx_cnt++;
1289 				goto mdio_cfg;
1290 			}
1291 			__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
1292 		                    (u32)(default_dtx_cfg[dtx_cnt]>>32),
1293 			            &bar0->dtx_control);
1294 			__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
1295 		                    (u32)default_dtx_cfg[dtx_cnt],
1296 			            &bar0->dtx_control);
1297 			xge_os_wmb();
1298 			xge_os_mdelay(1);
1299 			dtx_cnt++;
1300 		}
1301 	    mdio_cfg:
1302 		while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
1303 			if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
1304 				mdio_cnt++;
1305 				goto dtx_cfg;
1306 			}
1307 			__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
1308 		                     (u32)(default_mdio_cfg[mdio_cnt]>>32),
1309 			             &bar0->mdio_control);
1310 			__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
1311 		                     (u32)default_mdio_cfg[mdio_cnt],
1312 			             &bar0->mdio_control);
1313 			xge_os_wmb();
1314 			xge_os_mdelay(1);
1315 			mdio_cnt++;
1316 		}
1317 	} while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
1318 		    (default_mdio_cfg[mdio_cnt] == END_SIGN)) );
1319 
1320 	xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured");
1321 }
1322 
1323 /*
1324  * __hal_device_mac_link_util_set
1325  * @hldev: HAL device handle.
1326  *
1327  * Set sampling rate to calculate link utilization.
1328  */
1329 static void
1330 __hal_device_mac_link_util_set(xge_hal_device_t *hldev)
1331 {
1332 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1333 	u64 val64;
1334 
1335 	val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL(
1336 			hldev->config.mac.tmac_util_period) |
1337 		XGE_HAL_MAC_RX_LINK_UTIL_VAL(
1338 			hldev->config.mac.rmac_util_period);
1339 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1340 	                     &bar0->mac_link_util);
1341 	xge_debug_device(XGE_TRACE, "%s",
1342 			  "bandwidth link utilization configured");
1343 }
1344 
1345 /*
1346  * __hal_device_set_swapper
1347  * @hldev: HAL device handle.
1348  *
1349  * Set the Xframe's byte "swapper" in accordance with
1350  * endianness of the host.
1351  */
1352 xge_hal_status_e
1353 __hal_device_set_swapper(xge_hal_device_t *hldev)
1354 {
1355 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1356 	u64 val64;
1357 
1358 	/*
1359 	 * from 32bit errarta:
1360 	 *
1361 	 * The SWAPPER_CONTROL register determines how the adapter accesses
1362 	 * host memory as well as how it responds to read and write requests
1363 	 * from the host system. Writes to this register should be performed
1364 	 * carefully, since the byte swappers could reverse the order of bytes.
1365 	 * When configuring this register keep in mind that writes to the PIF
1366 	 * read and write swappers could reverse the order of the upper and
1367 	 * lower 32-bit words. This means that the driver may have to write
1368 	 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to
1369 	 * configure the entire register. */
1370 
1371 	/*
1372 	 * The device by default set to a big endian format, so a big endian
1373 	 * driver need not set anything.
1374 	 */
1375 
1376 #if defined(XGE_HAL_CUSTOM_HW_SWAPPER)
1377 
1378 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1379 			0xffffffffffffffffULL, &bar0->swapper_ctrl);
1380 
1381 	val64 = XGE_HAL_CUSTOM_HW_SWAPPER;
1382 
1383 	xge_os_wmb();
1384 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1385 			     &bar0->swapper_ctrl);
1386 
1387 	xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT,
1388 			(unsigned long long)val64);
1389 
1390 #elif !defined(XGE_OS_HOST_BIG_ENDIAN)
1391 
1392 	/*
1393 	 * Initially we enable all bits to make it accessible by the driver,
1394 	 * then we selectively enable only those bits that we want to set.
1395 	 * i.e. force swapper to swap for the first time since second write
1396 	 * will overwrite with the final settings.
1397 	 *
1398 	 * Use only for little endian platforms.
1399 	 */
1400 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1401 			0xffffffffffffffffULL, &bar0->swapper_ctrl);
1402 	xge_os_wmb();
1403 	val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE |
1404 		 XGE_HAL_SWAPPER_CTRL_PIF_R_SE |
1405 		 XGE_HAL_SWAPPER_CTRL_PIF_W_FE |
1406 		 XGE_HAL_SWAPPER_CTRL_PIF_W_SE |
1407 		 XGE_HAL_SWAPPER_CTRL_RTH_FE |
1408 		 XGE_HAL_SWAPPER_CTRL_RTH_SE |
1409 		 XGE_HAL_SWAPPER_CTRL_TXP_FE |
1410 		 XGE_HAL_SWAPPER_CTRL_TXP_SE |
1411 		 XGE_HAL_SWAPPER_CTRL_TXD_R_FE |
1412 		 XGE_HAL_SWAPPER_CTRL_TXD_R_SE |
1413 		 XGE_HAL_SWAPPER_CTRL_TXD_W_FE |
1414 		 XGE_HAL_SWAPPER_CTRL_TXD_W_SE |
1415 		 XGE_HAL_SWAPPER_CTRL_TXF_R_FE |
1416 		 XGE_HAL_SWAPPER_CTRL_RXD_R_FE |
1417 		 XGE_HAL_SWAPPER_CTRL_RXD_R_SE |
1418 		 XGE_HAL_SWAPPER_CTRL_RXD_W_FE |
1419 		 XGE_HAL_SWAPPER_CTRL_RXD_W_SE |
1420 		 XGE_HAL_SWAPPER_CTRL_RXF_W_FE |
1421 		 XGE_HAL_SWAPPER_CTRL_XMSI_FE |
1422 		 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE);
1423 /*
1424 	if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
1425 		 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE;
1426 	} */
1427 	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
1428 	                     &bar0->swapper_ctrl);
1429 	xge_os_wmb();
1430 	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1431 	                     &bar0->swapper_ctrl);
1432 	xge_os_wmb();
1433 	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1434 	                     &bar0->swapper_ctrl);
1435 	xge_debug_device(XGE_TRACE, "%s", "using little endian set");
1436 #endif
1437 
1438 	/*  Verifying if endian settings are accurate by reading a feedback
1439 	 *  register.  */
1440 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1441 	                            &bar0->pif_rd_swapper_fb);
1442 	if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) {
1443 		xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT,
1444 			  (unsigned long long) val64);
1445 		return XGE_HAL_ERR_SWAPPER_CTRL;
1446 	}
1447 
1448 	xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled");
1449 
1450 	return XGE_HAL_OK;
1451 }
1452 
1453 /*
1454  * __hal_device_rts_mac_configure - Configure RTS steering based on
1455  * destination mac address.
1456  * @hldev: HAL device handle.
1457  *
1458  */
1459 xge_hal_status_e
1460 __hal_device_rts_mac_configure(xge_hal_device_t *hldev)
1461 {
1462 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1463 	u64 val64;
1464 
1465 	if (!hldev->config.rts_mac_en) {
1466 		return XGE_HAL_OK;
1467 	}
1468 
1469 	/*
1470 	* Set the receive traffic steering mode from default(classic)
1471 	* to enhanced.
1472 	*/
1473 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1474 					&bar0->rts_ctrl);
1475 	val64 |=  XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1476 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1477 				val64, &bar0->rts_ctrl);
1478 	return XGE_HAL_OK;
1479 }
1480 
1481 /*
1482  * __hal_device_rts_qos_configure - Configure RTS steering based on
1483  * qos.
1484  * @hldev: HAL device handle.
1485  *
1486  */
1487 xge_hal_status_e
1488 __hal_device_rts_qos_configure(xge_hal_device_t *hldev)
1489 {
1490 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1491 	u64 val64;
1492 	int j;
1493 
1494 	if (!hldev->config.rts_qos_steering_config) {
1495 		return XGE_HAL_OK;
1496 	}
1497 
1498     /* First clear the RTS_DS_MEM_DATA */
1499     val64 = 0;
1500     for (j = 0; j < 64; j++ )
1501     {
1502         /* First clear the value */
1503         val64 = XGE_HAL_RTS_DS_MEM_DATA(0);
1504 
1505         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1506 	                     &bar0->rts_ds_mem_data);
1507 
1508         val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE |
1509                 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
1510                 XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j );
1511 
1512         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1513 	                     &bar0->rts_ds_mem_ctrl);
1514 
1515 
1516 		/* poll until done */
1517 		if (__hal_device_register_poll(hldev,
1518 		       &bar0->rts_ds_mem_ctrl, 0,
1519 		       XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
1520 		       XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1521 			/* upper layer may require to repeat */
1522 			return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1523 		}
1524 
1525     }
1526     /* Check for enhanced mode */
1527 
1528 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1529 					&bar0->rts_ctrl);
1530 
1531     /* Check to see if QOS Steering is turned ON and adapter is in classic mode */
1532     if (!(val64 & XGE_HAL_RTS_CTRL_ENHANCED_MODE))
1533     {
1534         /* Set the priority calendar - hard coded as all rings should be enabled */
1535         val64 = 0x0706050407030602;
1536         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1537                              &bar0->rx_w_round_robin_0);
1538 
1539         val64 = 0x0507040601070503;
1540         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1541                              &bar0->rx_w_round_robin_1);
1542 
1543         val64 = 0x0604070205060700;
1544         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1545                              &bar0->rx_w_round_robin_2);
1546 
1547         val64 = 0x0403060705010207;
1548         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1549                              &bar0->rx_w_round_robin_3);
1550 
1551         val64 = 0x0604050300000000;
1552         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1553                              &bar0->rx_w_round_robin_4);
1554 
1555     }
1556 	return XGE_HAL_OK;
1557 }
1558 
1559 /*
1560  * xge__hal_device_rts_mac_enable
1561  *
1562  * @devh: HAL device handle.
1563  * @index: index number where the MAC addr will be stored
1564  * @macaddr: MAC address
1565  *
1566  * - Enable RTS steering for the given MAC address. This function has to be
1567  * called with lock acquired.
1568  *
1569  * NOTE:
1570  * 1. ULD has to call this function with the index value which
1571  *    statisfies the following condition:
1572  *	ring_num = (index % 8)
1573  * 2.ULD also needs to make sure that the index is not
1574  *   occupied by any MAC address. If that index has any MAC address
1575  *   it will be overwritten and HAL will not check for it.
1576  *
1577  */
1578 xge_hal_status_e
1579 xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr)
1580 {
1581 	int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1582 	xge_hal_status_e status;
1583 
1584 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1585 
1586 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1587 		max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1588 
1589 	if ( index >= max_addr )
1590 		return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1591 
1592 	/*
1593 	 * Set the MAC address at the given location marked by index.
1594 	 */
1595 	status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1596 	if (status != XGE_HAL_OK) {
1597 		xge_debug_device(XGE_ERR, "%s",
1598 			"Not able to set the mac addr");
1599 		return status;
1600 	}
1601 
1602 	return xge_hal_device_rts_section_enable(hldev, index);
1603 }
1604 
1605 /*
1606  * xge__hal_device_rts_mac_disable
1607  * @hldev: HAL device handle.
1608  * @index: index number where to disable the MAC addr
1609  *
1610  * Disable RTS Steering based on the MAC address.
1611  * This function should be called with lock acquired.
1612  *
1613  */
1614 xge_hal_status_e
1615 xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index)
1616 {
1617 	xge_hal_status_e status;
1618 	u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1619 	int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1620 
1621 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1622 
1623 	xge_debug_ll(XGE_TRACE, "the index value is %d \n", index);
1624 
1625 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1626 		max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1627 
1628 	if ( index >= max_addr )
1629 		return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1630 
1631 	/*
1632 	 * Disable MAC address @ given index location
1633 	 */
1634 	status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1635 	if (status != XGE_HAL_OK) {
1636 		xge_debug_device(XGE_ERR, "%s",
1637 			"Not able to set the mac addr");
1638 		return status;
1639 	}
1640 
1641 	return XGE_HAL_OK;
1642 }
1643 
1644 
1645 /*
1646  * __hal_device_rth_configure - Configure RTH for the device
1647  * @hldev: HAL device handle.
1648  *
1649  * Using IT (Indirection Table).
1650  */
1651 xge_hal_status_e
1652 __hal_device_rth_it_configure(xge_hal_device_t *hldev)
1653 {
1654 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1655 	u64 val64;
1656 	int rings[XGE_HAL_MAX_RING_NUM]={0};
1657 	int rnum;
1658 	int rmax;
1659 	int buckets_num;
1660 	int bucket;
1661 
1662 	if (!hldev->config.rth_en) {
1663 		return XGE_HAL_OK;
1664 	}
1665 
1666 	/*
1667 	 * Set the receive traffic steering mode from default(classic)
1668 	 * to enhanced.
1669 	 */
1670 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1671 				      &bar0->rts_ctrl);
1672 	val64 |=  XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1673 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1674 			       val64, &bar0->rts_ctrl);
1675 
1676 	buckets_num = (1 << hldev->config.rth_bucket_size);
1677 
1678 	rmax=0;
1679 	for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1680 		if (hldev->config.ring.queue[rnum].configured &&
1681 				hldev->config.ring.queue[rnum].rth_en)
1682 				rings[rmax++] = rnum;
1683     }
1684 
1685 	rnum = 0;
1686 	/* for starters: fill in all the buckets with rings "equally" */
1687 	for (bucket = 0; bucket < buckets_num; bucket++) {
1688 
1689 	    if (rnum == rmax)
1690            rnum = 0;
1691 
1692 		/* write data */
1693 		val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
1694 		        XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]);
1695 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1696 				     &bar0->rts_rth_map_mem_data);
1697 
1698 		/* execute */
1699 		val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
1700 			XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
1701 			XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket);
1702 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1703 				     &bar0->rts_rth_map_mem_ctrl);
1704 
1705 		/* poll until done */
1706 		if (__hal_device_register_poll(hldev,
1707 			&bar0->rts_rth_map_mem_ctrl, 0,
1708 			XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
1709 			XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1710 			return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1711 		}
1712 
1713         rnum++;
1714 	}
1715 
1716 	val64 = XGE_HAL_RTS_RTH_EN;
1717 	val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size);
1718 	val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN |
1719 			 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN |
1720 			 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN;
1721 
1722 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1723 			     &bar0->rts_rth_cfg);
1724 
1725 	xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d",
1726 			  hldev->config.rth_bucket_size);
1727 
1728 	return XGE_HAL_OK;
1729 }
1730 
1731 
1732 /*
1733  * __hal_spdm_entry_add - Add a new entry to the SPDM table.
1734  *
1735  * Add a new entry to the SPDM table
1736  *
1737  * This function add a new entry to the SPDM table.
1738  *
1739  * Note:
1740  *   This function should be called with spdm_lock.
1741  *
1742  * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove.
1743  */
1744 static xge_hal_status_e
1745 __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip,
1746 		xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp,
1747 		u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry)
1748 {
1749 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1750 	u64 val64;
1751 	u64 spdm_line_arr[8];
1752 	u8 line_no;
1753 
1754 	/*
1755 	 * Clear the SPDM READY bit
1756 	 */
1757 	val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
1758 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1759 			       &bar0->rxpic_int_reg);
1760 
1761 	xge_debug_device(XGE_TRACE,
1762 			"L4 SP %x:DP %x: hash %x tgt_queue %d \n",
1763 			l4_sp, l4_dp, jhash_value, tgt_queue);
1764 
1765 	xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr));
1766 
1767 	/*
1768 	 * Construct the SPDM entry.
1769 	 */
1770 	spdm_line_arr[0] = vBIT(l4_sp,0,16) |
1771 			   vBIT(l4_dp,16,32) |
1772 			   vBIT(tgt_queue,53,3)	|
1773 			   vBIT(is_tcp,59,1) |
1774 			   vBIT(is_ipv4,63,1);
1775 
1776 
1777 	if (is_ipv4) {
1778 		spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) |
1779 				   vBIT(dst_ip->ipv4.addr,32,32);
1780 
1781 	} else {
1782 		xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8);
1783 		xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8);
1784 		xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8);
1785 		xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8);
1786 	}
1787 
1788 	spdm_line_arr[7] = vBIT(jhash_value,0,32) |
1789 				BIT(63);  /* entry enable bit */
1790 
1791 	/*
1792 	 * Add the entry to the SPDM table
1793 	 */
1794 	for(line_no = 0; line_no < 8; line_no++) {
1795 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1796 				spdm_line_arr[line_no],
1797 				(void *)((char *)hldev->spdm_mem_base +
1798 						(spdm_entry * 64) +
1799 						(line_no * 8)));
1800 	}
1801 
1802 	/*
1803 	 * Wait for the operation to be completed.
1804 	 */
1805 	if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
1806 			XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
1807 			XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1808 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1809 	}
1810 
1811 	/*
1812 	 * Add this information to a local SPDM table. The purpose of
1813 	 * maintaining a local SPDM table is to avoid a search in the
1814 	 * adapter SPDM table for spdm entry lookup which is very costly
1815 	 * in terms of time.
1816 	 */
1817 	hldev->spdm_table[spdm_entry]->in_use = 1;
1818 	xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip,
1819 		    sizeof(xge_hal_ipaddr_t));
1820 	xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip,
1821 		    sizeof(xge_hal_ipaddr_t));
1822 	hldev->spdm_table[spdm_entry]->l4_sp = l4_sp;
1823 	hldev->spdm_table[spdm_entry]->l4_dp = l4_dp;
1824 	hldev->spdm_table[spdm_entry]->is_tcp = is_tcp;
1825 	hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4;
1826 	hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue;
1827 	hldev->spdm_table[spdm_entry]->jhash_value = jhash_value;
1828 	hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry;
1829 
1830 	return XGE_HAL_OK;
1831 }
1832 
1833 /*
1834  * __hal_device_rth_spdm_configure - Configure RTH for the device
1835  * @hldev: HAL device handle.
1836  *
1837  * Using SPDM (Socket-Pair Direct Match).
1838  */
1839 xge_hal_status_e
1840 __hal_device_rth_spdm_configure(xge_hal_device_t *hldev)
1841 {
1842 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
1843 	u64 val64;
1844 	u8 spdm_bar_num;
1845 	u32 spdm_bar_offset;
1846 	int spdm_table_size;
1847 	int i;
1848 
1849 	if (!hldev->config.rth_spdm_en) {
1850 		return XGE_HAL_OK;
1851 	}
1852 
1853 	/*
1854 	 * Retrieve the base address of SPDM Table.
1855 	 */
1856 	val64 = xge_os_pio_mem_read64(hldev->pdev,
1857 			hldev->regh0, &bar0->spdm_bir_offset);
1858 
1859 	spdm_bar_num	= XGE_HAL_SPDM_PCI_BAR_NUM(val64);
1860 	spdm_bar_offset	= XGE_HAL_SPDM_PCI_BAR_OFFSET(val64);
1861 
1862 
1863 	/*
1864 	 * spdm_bar_num specifies the PCI bar num register used to
1865 	 * address the memory space. spdm_bar_offset specifies the offset
1866 	 * of the SPDM memory with in the bar num memory space.
1867 	 */
1868 	switch (spdm_bar_num) {
1869 		case 0:
1870 		{
1871 			hldev->spdm_mem_base = (char *)bar0 +
1872 						(spdm_bar_offset * 8);
1873 			break;
1874 		}
1875 		case 1:
1876 		{
1877 			char *bar1 = (char *)hldev->bar1;
1878 			hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8);
1879 			break;
1880 		}
1881 		default:
1882 			xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1)));
1883 	}
1884 
1885 	/*
1886 	 * Retrieve the size of SPDM table(number of entries).
1887 	 */
1888 	val64 = xge_os_pio_mem_read64(hldev->pdev,
1889 			hldev->regh0, &bar0->spdm_structure);
1890 	hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64);
1891 
1892 
1893 	spdm_table_size = hldev->spdm_max_entries *
1894 					sizeof(xge_hal_spdm_entry_t);
1895 	if (hldev->spdm_table == NULL) {
1896 		void *mem;
1897 
1898 		/*
1899 		 * Allocate memory to hold the copy of SPDM table.
1900 		 */
1901 		if ((hldev->spdm_table = (xge_hal_spdm_entry_t **)
1902 					xge_os_malloc(
1903 					 hldev->pdev,
1904 					 (sizeof(xge_hal_spdm_entry_t *) *
1905 					 hldev->spdm_max_entries))) == NULL) {
1906 			return XGE_HAL_ERR_OUT_OF_MEMORY;
1907 		}
1908 
1909 		if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL)
1910 		{
1911 			xge_os_free(hldev->pdev, hldev->spdm_table,
1912 				  (sizeof(xge_hal_spdm_entry_t *) *
1913 					 hldev->spdm_max_entries));
1914 			return XGE_HAL_ERR_OUT_OF_MEMORY;
1915 		}
1916 
1917 		xge_os_memzero(mem, spdm_table_size);
1918 		for (i = 0; i < hldev->spdm_max_entries; i++) {
1919 			hldev->spdm_table[i] = (xge_hal_spdm_entry_t *)
1920 					((char *)mem +
1921 					 i * sizeof(xge_hal_spdm_entry_t));
1922 		}
1923 		xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev);
1924 	} else {
1925 		/*
1926 		 * We are here because the host driver tries to
1927 		 * do a soft reset on the device.
1928 		 * Since the device soft reset clears the SPDM table, copy
1929 		 * the entries from the local SPDM table to the actual one.
1930 		 */
1931 		xge_os_spin_lock(&hldev->spdm_lock);
1932 		for (i = 0; i < hldev->spdm_max_entries; i++) {
1933 			xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i];
1934 
1935 			if (spdm_entry->in_use) {
1936 				if (__hal_spdm_entry_add(hldev,
1937 							 &spdm_entry->src_ip,
1938 							 &spdm_entry->dst_ip,
1939 							 spdm_entry->l4_sp,
1940 							 spdm_entry->l4_dp,
1941 							 spdm_entry->is_tcp,
1942 							 spdm_entry->is_ipv4,
1943 							 spdm_entry->tgt_queue,
1944 							 spdm_entry->jhash_value,
1945 							 spdm_entry->spdm_entry)
1946 						!= XGE_HAL_OK) {
1947 					/* Log an warning */
1948 					xge_debug_device(XGE_ERR,
1949 						"SPDM table update from local"
1950 						" memory failed");
1951 				}
1952 			}
1953 		}
1954 		xge_os_spin_unlock(&hldev->spdm_lock);
1955 	}
1956 
1957 	/*
1958 	 * Set the receive traffic steering mode from default(classic)
1959 	 * to enhanced.
1960 	 */
1961 	val64 = xge_os_pio_mem_read64(hldev->pdev,
1962 				    hldev->regh0, &bar0->rts_ctrl);
1963 	val64 |=  XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1964 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1965 			     val64, &bar0->rts_ctrl);
1966 
1967 	/*
1968 	 * We may not need to configure rts_rth_jhash_cfg register as the
1969 	 * default values are good enough to calculate the hash.
1970 	 */
1971 
1972 	/*
1973 	 * As of now, set all the rth mask registers to zero. TODO.
1974 	 */
1975 	for(i = 0; i < 5; i++) {
1976 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1977 				     0, &bar0->rts_rth_hash_mask[i]);
1978 	}
1979 
1980 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1981 			     0, &bar0->rts_rth_hash_mask_5);
1982 
1983 	if (hldev->config.rth_spdm_use_l4) {
1984 		val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4;
1985 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1986 				     val64, &bar0->rts_rth_status);
1987 	}
1988 
1989 	val64 = XGE_HAL_RTS_RTH_EN;
1990 	val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN;
1991 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1992 			     &bar0->rts_rth_cfg);
1993 
1994 
1995 	return XGE_HAL_OK;
1996 }
1997 
1998 /*
1999  * __hal_device_pci_init
2000  * @hldev: HAL device handle.
2001  *
2002  * Initialize certain PCI/PCI-X configuration registers
2003  * with recommended values. Save config space for future hw resets.
2004  */
2005 static void
2006 __hal_device_pci_init(xge_hal_device_t *hldev)
2007 {
2008 	int i, pcisize = 0;
2009 	u16 cmd = 0;
2010 	u8  val;
2011 
2012 	/* Set the PErr Repconse bit and SERR in PCI command register. */
2013 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2014 			xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
2015 	cmd |= 0x140;
2016 	xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2017 			 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
2018 
2019 	/* Set user spcecified value for the PCI Latency Timer */
2020 	if (hldev->config.latency_timer &&
2021 	    hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
2022 		xge_os_pci_write8(hldev->pdev, hldev->cfgh,
2023 	                 xge_offsetof(xge_hal_pci_config_le_t,
2024 	                 latency_timer),
2025 			 (u8)hldev->config.latency_timer);
2026 	}
2027 	/* Read back latency timer to reflect it into user level */
2028 	xge_os_pci_read8(hldev->pdev, hldev->cfgh,
2029 		xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val);
2030 	hldev->config.latency_timer = val;
2031 
2032 	/* Enable Data Parity Error Recovery in PCI-X command register. */
2033 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2034 		xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2035 	cmd |= 1;
2036 	xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2037 		 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2038 
2039 	/* Set MMRB count in PCI-X command register. */
2040 	if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) {
2041 		cmd &= 0xFFF3;
2042 		cmd |= hldev->config.mmrb_count << 2;
2043 		xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2044 		       xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2045 		       cmd);
2046 	}
2047 	/* Read back MMRB count to reflect it into user level */
2048 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2049 		        xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2050 		        &cmd);
2051 	cmd &= 0x000C;
2052 	hldev->config.mmrb_count = cmd>>2;
2053 
2054 	/*  Setting Maximum outstanding splits based on system type. */
2055 	if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS)  {
2056 		xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2057 			xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2058 			&cmd);
2059 		cmd &= 0xFF8F;
2060 		cmd |= hldev->config.max_splits_trans << 4;
2061 		xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2062 			xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2063 			cmd);
2064 	}
2065 
2066 	/* Read back max split trans to reflect it into user level */
2067 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2068 		xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2069 	cmd &= 0x0070;
2070 	hldev->config.max_splits_trans = cmd>>4;
2071 
2072 	/* Forcibly disabling relaxed ordering capability of the card. */
2073 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2074 		xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2075 	cmd &= 0xFFFD;
2076 	xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2077 		 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2078 
2079 	/* Store PCI device ID and revision for future references where in we
2080 	 * decide Xena revision using PCI sub system ID */
2081 	xge_os_pci_read16(hldev->pdev,hldev->cfgh,
2082 			xge_offsetof(xge_hal_pci_config_le_t, device_id),
2083 			&hldev->device_id);
2084 	xge_os_pci_read8(hldev->pdev,hldev->cfgh,
2085 			xge_offsetof(xge_hal_pci_config_le_t, revision),
2086 			&hldev->revision);
2087 
2088 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
2089 		pcisize = XGE_HAL_PCISIZE_HERC;
2090 	else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
2091 		pcisize = XGE_HAL_PCISIZE_XENA;
2092 
2093 	/* save PCI config space for future resets */
2094 	for (i = 0; i < pcisize; i++) {
2095 		xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2096 		                (u32*)&hldev->pci_config_space + i);
2097 	}
2098 
2099 #if defined(XGE_HAL_MSI)
2100 	/* Upper limit of the MSI number enabled by the system */
2101 	xge_os_pci_read32(hldev->pdev, hldev->cfgh,
2102 			xge_offsetof(xge_hal_pci_config_le_t, msi_control),
2103 			&hldev->msi_mask);
2104 	hldev->msi_mask &= 0x70;
2105 	if (!hldev->msi_mask)
2106 		return;
2107 	hldev->msi_mask >>= 4; /*
2108 				   * This number's power of 2 is the number
2109 				   * of MSIs enabled.
2110 				   */
2111 	hldev->msi_mask = (0x1 << hldev->msi_mask);
2112 	/*
2113 	 * NOTE:
2114 	 * If 32 MSIs are enabled, then MSI numbers range from 0 - 31.
2115 	 */
2116 	hldev->msi_mask -= 1;
2117 #endif
2118 }
2119 
2120 /*
2121  * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency
2122  *                               and mode.
2123  * @devh: HAL device handle.
2124  * @pci_mode:		pointer to a variable of enumerated type
2125  *			xge_hal_pci_mode_e{}.
2126  * @bus_frequency:	pointer to a variable of enumerated type
2127  *			xge_hal_pci_bus_frequency_e{}.
2128  * @bus_width:		pointer to a variable of enumerated type
2129  *			xge_hal_pci_bus_width_e{}.
2130  *
2131  * Get pci mode, frequency, and PCI bus width.
2132  *
2133  * Returns: one of the xge_hal_status_e{} enumerated types.
2134  * XGE_HAL_OK			- for success.
2135  * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card.
2136  * XGE_HAL_ERR_BAD_DEVICE_ID	- for invalid card.
2137  *
2138  * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
2139  */
2140 static xge_hal_status_e
2141 __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
2142 		xge_hal_pci_bus_frequency_e *bus_frequency,
2143 		xge_hal_pci_bus_width_e *bus_width)
2144 {
2145 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2146 	xge_hal_status_e rc_status = XGE_HAL_OK;
2147 	xge_hal_card_e card_id	   = xge_hal_device_check_id (devh);
2148 
2149 #ifdef XGE_HAL_HERC_EMULATION
2150 	hldev->config.pci_freq_mherz =
2151 		XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2152 	*bus_frequency	=
2153 		XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2154 	*pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2155 #else
2156 	if (card_id == XGE_HAL_CARD_HERC) {
2157 		xge_hal_pci_bar0_t *bar0 =
2158 		(xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2159 		u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2160 				    &bar0->pci_info);
2161 		if (XGE_HAL_PCI_32_BIT & pci_info)
2162 			*bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT;
2163 		else
2164 			*bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2165 		switch((pci_info & XGE_HAL_PCI_INFO)>>60)
2166 		{
2167 			case XGE_HAL_PCI_33MHZ_MODE:
2168 				 *bus_frequency	=
2169 					 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ;
2170 				 *pci_mode = XGE_HAL_PCI_33MHZ_MODE;
2171 				 break;
2172 			case XGE_HAL_PCI_66MHZ_MODE:
2173 				 *bus_frequency	=
2174 					 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2175 				 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2176 				 break;
2177 			case XGE_HAL_PCIX_M1_66MHZ_MODE:
2178 				 *bus_frequency	=
2179 					 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2180 				 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE;
2181 				 break;
2182 			case XGE_HAL_PCIX_M1_100MHZ_MODE:
2183 				 *bus_frequency	=
2184 					 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ;
2185 				 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE;
2186 				 break;
2187 			case XGE_HAL_PCIX_M1_133MHZ_MODE:
2188 				 *bus_frequency	=
2189 					 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2190 				 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE;
2191 				 break;
2192 			case XGE_HAL_PCIX_M2_66MHZ_MODE:
2193 				 *bus_frequency	=
2194 					 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2195 				 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE;
2196 				 break;
2197 			case XGE_HAL_PCIX_M2_100MHZ_MODE:
2198 				 *bus_frequency	=
2199 					 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ;
2200 				 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE;
2201 				 break;
2202 			case XGE_HAL_PCIX_M2_133MHZ_MODE:
2203 				 *bus_frequency	=
2204 					 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ;
2205 				 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE;
2206 				  break;
2207 			case XGE_HAL_PCIX_M1_RESERVED:
2208 			case XGE_HAL_PCIX_M1_66MHZ_NS:
2209 			case XGE_HAL_PCIX_M1_100MHZ_NS:
2210 			case XGE_HAL_PCIX_M1_133MHZ_NS:
2211 			case XGE_HAL_PCIX_M2_RESERVED:
2212 			case XGE_HAL_PCIX_533_RESERVED:
2213 			default:
2214 				 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO;
2215 				 xge_debug_device(XGE_ERR,
2216 					  "invalid pci info "XGE_OS_LLXFMT,
2217 					 (unsigned long long)pci_info);
2218 				 break;
2219 		}
2220 		if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO)
2221 			xge_debug_device(XGE_TRACE, "PCI info: mode %d width "
2222 				"%d frequency %d", *pci_mode, *bus_width,
2223 				*bus_frequency);
2224 		if (hldev->config.pci_freq_mherz ==
2225 				XGE_HAL_DEFAULT_USE_HARDCODE) {
2226 			hldev->config.pci_freq_mherz = *bus_frequency;
2227 		}
2228 	}
2229 	/* for XENA, we report PCI mode, only. PCI bus frequency, and bus width
2230 	 * are set to unknown */
2231 	else if (card_id == XGE_HAL_CARD_XENA) {
2232 		u32 pcix_status;
2233 		u8 dev_num, bus_num;
2234 		/* initialize defaults for XENA */
2235 		*bus_frequency	= XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2236 		*bus_width	= XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2237 		xge_os_pci_read32(hldev->pdev, hldev->cfgh,
2238 			xge_offsetof(xge_hal_pci_config_le_t, pcix_status),
2239 			&pcix_status);
2240 		dev_num = (u8)((pcix_status & 0xF8) >> 3);
2241 		bus_num = (u8)((pcix_status & 0xFF00) >> 8);
2242 		if (dev_num == 0 && bus_num == 0)
2243 			*pci_mode = XGE_HAL_PCI_BASIC_MODE;
2244 		else
2245 			*pci_mode = XGE_HAL_PCIX_BASIC_MODE;
2246 		xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode);
2247 		if (hldev->config.pci_freq_mherz ==
2248 				XGE_HAL_DEFAULT_USE_HARDCODE) {
2249 			/*
2250 			 * There is no way to detect BUS frequency on Xena,
2251 			 * so, in case of automatic configuration we hopelessly
2252 			 * assume 133MHZ.
2253 			 */
2254 			hldev->config.pci_freq_mherz =
2255 				XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2256 		}
2257 	} else{
2258 		rc_status =  XGE_HAL_ERR_BAD_DEVICE_ID;
2259 		xge_debug_device(XGE_ERR, "invalid device id %d", card_id);
2260 	}
2261 #endif
2262 
2263 	return rc_status;
2264 }
2265 
2266 /*
2267  * __hal_device_handle_link_up_ind
2268  * @hldev: HAL device handle.
2269  *
2270  * Link up indication handler. The function is invoked by HAL when
2271  * Xframe indicates that the link is up for programmable amount of time.
2272  */
2273 static int
2274 __hal_device_handle_link_up_ind(xge_hal_device_t *hldev)
2275 {
2276 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2277 	u64 val64;
2278 
2279 	/*
2280 	 * If the previous link state is not down, return.
2281 	 */
2282 	if (hldev->link_state == XGE_HAL_LINK_UP) {
2283 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2284 		if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2285 			val64 = xge_os_pio_mem_read64(
2286 				hldev->pdev, hldev->regh0,
2287 				&bar0->misc_int_mask);
2288 			val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2289 			val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2290 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2291 				val64, &bar0->misc_int_mask);
2292 		}
2293 #endif
2294 		xge_debug_device(XGE_TRACE,
2295 			"link up indication while link is up, ignoring..");
2296 		return 0;
2297 	}
2298 
2299 	/* Now re-enable it as due to noise, hardware turned it off */
2300 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2301 				     &bar0->adapter_control);
2302 	val64 |= XGE_HAL_ADAPTER_CNTL_EN;
2303 	val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
2304 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2305 			     &bar0->adapter_control);
2306 
2307 	/* Turn on the Laser */
2308 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2309 				    &bar0->adapter_control);
2310 	val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON |
2311 			XGE_HAL_ADAPTER_LED_ON);
2312 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2313 			     &bar0->adapter_control);
2314 
2315 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2316 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2317 	        val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2318 				              &bar0->adapter_status);
2319 	        if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2320 		             XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) {
2321 		        xge_debug_device(XGE_TRACE, "%s",
2322 				          "fail to transition link to up...");
2323 			return 0;
2324 	        }
2325 	        else {
2326 		        /*
2327 		         * Mask the Link Up interrupt and unmask the Link Down
2328 		         * interrupt.
2329 		         */
2330 		        val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2331 					              &bar0->misc_int_mask);
2332 		        val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2333 		        val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2334 		        xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2335 				               &bar0->misc_int_mask);
2336 		        xge_debug_device(XGE_TRACE, "calling link up..");
2337 		        hldev->link_state = XGE_HAL_LINK_UP;
2338 
2339 		        /* notify ULD */
2340 		        if (g_xge_hal_driver->uld_callbacks.link_up) {
2341 			        g_xge_hal_driver->uld_callbacks.link_up(
2342 					        hldev->upper_layer_info);
2343 		        }
2344 			return 1;
2345 	        }
2346         }
2347 #endif
2348 	xge_os_mdelay(1);
2349 	if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2350 			(XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2351 			XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2352 			XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2353 
2354 		/* notify ULD */
2355 		(void) xge_queue_produce_context(hldev->queueh,
2356 						 XGE_HAL_EVENT_LINK_IS_UP,
2357 						 hldev);
2358 		/* link is up after been enabled */
2359 		return 1;
2360 	} else {
2361 		xge_debug_device(XGE_TRACE, "%s",
2362 				  "fail to transition link to up...");
2363 		return 0;
2364 	}
2365 }
2366 
2367 /*
2368  * __hal_device_handle_link_down_ind
2369  * @hldev: HAL device handle.
2370  *
2371  * Link down indication handler. The function is invoked by HAL when
2372  * Xframe indicates that the link is down.
2373  */
2374 static int
2375 __hal_device_handle_link_down_ind(xge_hal_device_t *hldev)
2376 {
2377 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2378 	u64 val64;
2379 
2380 	/*
2381 	 * If the previous link state is not up, return.
2382 	 */
2383 	if (hldev->link_state == XGE_HAL_LINK_DOWN) {
2384 #ifdef	XGE_HAL_PROCESS_LINK_INT_IN_ISR
2385 		if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2386 			val64 = xge_os_pio_mem_read64(
2387 				hldev->pdev, hldev->regh0,
2388 				&bar0->misc_int_mask);
2389 			val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2390 			val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2391 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2392 				val64, &bar0->misc_int_mask);
2393 		}
2394 #endif
2395 		xge_debug_device(XGE_TRACE,
2396 			"link down indication while link is down, ignoring..");
2397 		return 0;
2398 	}
2399 	xge_os_mdelay(1);
2400 
2401 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2402 				      &bar0->adapter_control);
2403 
2404 	/* try to debounce the link only if the adapter is enabled. */
2405 	if (val64 & XGE_HAL_ADAPTER_CNTL_EN) {
2406 		if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2407 			(XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2408 			XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2409 			XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2410 			xge_debug_device(XGE_TRACE,
2411 				"link is actually up (possible noisy link?), ignoring.");
2412 			return(0);
2413 		}
2414 	}
2415 
2416 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2417 				    &bar0->adapter_control);
2418 	/* turn off LED */
2419 	val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
2420 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2421 			       &bar0->adapter_control);
2422 
2423 #ifdef  XGE_HAL_PROCESS_LINK_INT_IN_ISR
2424 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2425 		/*
2426 		 * Mask the Link Down interrupt and unmask the Link up
2427 		 * interrupt
2428 		 */
2429 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2430 					      &bar0->misc_int_mask);
2431 		val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2432 		val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2433 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2434 				       &bar0->misc_int_mask);
2435 
2436 		/* link is down */
2437 		xge_debug_device(XGE_TRACE, "calling link down..");
2438 		hldev->link_state = XGE_HAL_LINK_DOWN;
2439 
2440 		/* notify ULD */
2441 		if (g_xge_hal_driver->uld_callbacks.link_down) {
2442 				g_xge_hal_driver->uld_callbacks.link_down(
2443 					hldev->upper_layer_info);
2444 		}
2445 		return 1;
2446 	}
2447 #endif
2448 	/* notify ULD */
2449 	(void) xge_queue_produce_context(hldev->queueh,
2450 					 XGE_HAL_EVENT_LINK_IS_DOWN,
2451 					 hldev);
2452 	/* link is down */
2453 	return 1;
2454 }
2455 /*
2456  * __hal_device_handle_link_state_change
2457  * @hldev: HAL device handle.
2458  *
2459  * Link state change handler. The function is invoked by HAL when
2460  * Xframe indicates link state change condition. The code here makes sure to
2461  * 1) ignore redundant state change indications;
2462  * 2) execute link-up sequence, and handle the failure to bring the link up;
2463  * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by
2464  *    upper-layer driver (ULD).
2465  */
2466 static int
2467 __hal_device_handle_link_state_change(xge_hal_device_t *hldev)
2468 {
2469 	u64 hw_status;
2470 	int hw_link_state;
2471 	int retcode;
2472 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2473 	u64 val64;
2474 	int i = 0;
2475 
2476 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2477 					&bar0->adapter_control);
2478 
2479 	/* If the adapter is not enabled but the hal thinks we are in the up
2480 	 * state then transition to the down state.
2481 	 */
2482 	if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) &&
2483 	     (hldev->link_state == XGE_HAL_LINK_UP) ) {
2484 		return(__hal_device_handle_link_down_ind(hldev));
2485 	}
2486 
2487 	do {
2488 		xge_os_mdelay(1);
2489 		(void) xge_hal_device_status(hldev, &hw_status);
2490 		hw_link_state = (hw_status &
2491 			(XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2492 				XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ?
2493 				XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP;
2494 
2495 		/* check if the current link state is still considered
2496 		 * to be changed. This way we will make sure that this is
2497 		 * not a noise which needs to be filtered out */
2498 		if (hldev->link_state == hw_link_state)
2499 			break;
2500 	} while (i++ < hldev->config.link_valid_cnt);
2501 
2502 	/* If the current link state is same as previous, just return */
2503 	if (hldev->link_state == hw_link_state)
2504 		retcode = 0;
2505 	/* detected state change */
2506 	else if (hw_link_state == XGE_HAL_LINK_UP)
2507 		retcode = __hal_device_handle_link_up_ind(hldev);
2508 	else
2509 		retcode = __hal_device_handle_link_down_ind(hldev);
2510 	return retcode;
2511 }
2512 
2513 /*
2514  *
2515  */
2516 static void
2517 __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value)
2518 {
2519 	hldev->stats.sw_dev_err_stats.serr_cnt++;
2520 	if (hldev->config.dump_on_serr) {
2521 #ifdef XGE_HAL_USE_MGMT_AUX
2522 		(void) xge_hal_aux_device_dump(hldev);
2523 #endif
2524 	}
2525 
2526 	(void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev,
2527 			   1, sizeof(u64), (void *)&value);
2528 
2529 	xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2530 				  (unsigned long long) value);
2531 }
2532 
2533 /*
2534  *
2535  */
2536 static void
2537 __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value)
2538 {
2539 	if (hldev->config.dump_on_eccerr) {
2540 #ifdef XGE_HAL_USE_MGMT_AUX
2541 		(void) xge_hal_aux_device_dump(hldev);
2542 #endif
2543 	}
2544 
2545 	/* Herc smart enough to recover on its own! */
2546 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
2547 		(void) xge_queue_produce(hldev->queueh,
2548 			XGE_HAL_EVENT_ECCERR, hldev,
2549 			1, sizeof(u64), (void *)&value);
2550 	}
2551 
2552         xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2553                                   (unsigned long long) value);
2554 }
2555 
2556 /*
2557  *
2558  */
2559 static void
2560 __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value)
2561 {
2562 	if (hldev->config.dump_on_parityerr) {
2563 #ifdef XGE_HAL_USE_MGMT_AUX
2564 		(void) xge_hal_aux_device_dump(hldev);
2565 #endif
2566 	}
2567 	(void) xge_queue_produce_context(hldev->queueh,
2568 			XGE_HAL_EVENT_PARITYERR, hldev);
2569 
2570         xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2571                                   (unsigned long long) value);
2572 }
2573 
2574 /*
2575  *
2576  */
2577 static void
2578 __hal_device_handle_targetabort(xge_hal_device_t *hldev)
2579 {
2580 	(void) xge_queue_produce_context(hldev->queueh,
2581 			XGE_HAL_EVENT_TARGETABORT, hldev);
2582 }
2583 
2584 
2585 /*
2586  * __hal_device_hw_initialize
2587  * @hldev: HAL device handle.
2588  *
2589  * Initialize Xframe hardware.
2590  */
2591 static xge_hal_status_e
2592 __hal_device_hw_initialize(xge_hal_device_t *hldev)
2593 {
2594 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2595 	xge_hal_status_e status;
2596 	u64 val64;
2597 
2598 	/* Set proper endian settings and verify the same by reading the PIF
2599 	 * Feed-back register. */
2600 	status = __hal_device_set_swapper(hldev);
2601 	if (status != XGE_HAL_OK) {
2602 		return status;
2603 	}
2604 
2605 	/* update the pci mode, frequency, and width */
2606 	if (__hal_device_pci_info_get(hldev, &hldev->pci_mode,
2607 		&hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){
2608 		hldev->pci_mode	= XGE_HAL_PCI_INVALID_MODE;
2609 		hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2610 		hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2611 		/*
2612 		 * FIXME: this cannot happen.
2613 		 * But if it happens we cannot continue just like that
2614 		 */
2615 		xge_debug_device(XGE_ERR, "unable to get pci info");
2616 	}
2617 
2618 	if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) ||
2619 		(hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) ||
2620 		(hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) {
2621 		/* PCI optimization: set TxReqTimeOut
2622 		 * register (0x800+0x120) to 0x1ff or
2623 		 * something close to this.
2624 		 * Note: not to be used for PCI-X! */
2625 
2626 		val64 = XGE_HAL_TXREQTO_VAL(0x1FF);
2627 		val64 |= XGE_HAL_TXREQTO_EN;
2628 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2629 				     &bar0->txreqtimeout);
2630 
2631 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2632 				     &bar0->read_retry_delay);
2633 
2634 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2635 				     &bar0->write_retry_delay);
2636 
2637 		xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode");
2638 	}
2639 
2640 	/* added this to set the no of bytes used to update lso_bytes_sent
2641 	   returned TxD0 */
2642 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2643 				      &bar0->pic_control_2);
2644 	val64 |= XGE_HAL_TXD_WRITE_BC(0x4);
2645 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2646 			       &bar0->pic_control_2);
2647 	/* added this to clear the EOI_RESET field while leaving XGXS_RESET
2648 	 * in reset, then a 1-second delay */
2649 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2650 			XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset);
2651 	xge_os_mdelay(1000);
2652 
2653 	/* Clear the XGXS_RESET field of the SW_RESET register in order to
2654 	 * release the XGXS from reset. Its reset value is 0xA5; write 0x00
2655 	 * to activate the XGXS. The core requires a minimum 500 us reset.*/
2656         xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset);
2657 	(void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2658 				&bar0->sw_reset);
2659 	xge_os_mdelay(1);
2660 
2661 	/* read registers in all blocks */
2662 	(void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2663 				   &bar0->mac_int_mask);
2664 	(void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2665 				   &bar0->mc_int_mask);
2666 	(void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2667 				   &bar0->xgxs_int_mask);
2668 
2669 	/* set default MTU and steer based on length*/
2670 	__hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work
2671 
2672 	if (hldev->config.mac.rmac_bcast_en) {
2673         xge_hal_device_bcast_enable(hldev);
2674 	} else {
2675 	    xge_hal_device_bcast_disable(hldev);
2676 	}
2677 
2678 #ifndef XGE_HAL_HERC_EMULATION
2679 	__hal_device_xaui_configure(hldev);
2680 #endif
2681 	__hal_device_mac_link_util_set(hldev);
2682 
2683 	__hal_device_mac_link_util_set(hldev);
2684 
2685 	/*
2686 	 * Keep its PCI REQ# line asserted during a write
2687 	 * transaction up to the end of the transaction
2688 	 */
2689 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2690 	                &bar0->misc_control);
2691 	val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN;
2692 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2693 	                val64, &bar0->misc_control);
2694 
2695 	/*
2696 	 * bimodal interrupts is when all Rx traffic interrupts
2697 	 * will go to TTI, so we need to adjust RTI settings and
2698 	 * use adaptive TTI timer. We need to make sure RTI is
2699 	 * properly configured to sane value which will not
2700 	 * distrupt bimodal behavior.
2701 	 */
2702 	if (hldev->config.bimodal_interrupts) {
2703 		int i;
2704 
2705 		/* force polling_cnt to be "0", otherwise
2706 		 * IRQ workload statistics will be screwed. This could
2707 		 * be worked out in TXPIC handler later. */
2708 		hldev->config.isr_polling_cnt = 0;
2709 		hldev->config.sched_timer_us = 10000;
2710 
2711 		/* disable all TTI < 56 */
2712 		for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
2713 			int j;
2714 			if (!hldev->config.fifo.queue[i].configured)
2715 				continue;
2716 			for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
2717 			    if (hldev->config.fifo.queue[i].tti[j].enabled)
2718 				hldev->config.fifo.queue[i].tti[j].enabled = 0;
2719 			}
2720 		}
2721 
2722 		/* now configure bimodal interrupts */
2723 		__hal_device_bimodal_configure(hldev);
2724 	}
2725 
2726 	status = __hal_device_tti_configure(hldev, 0);
2727 	if (status != XGE_HAL_OK)
2728 		return status;
2729 
2730 	status = __hal_device_rti_configure(hldev, 0);
2731 	if (status != XGE_HAL_OK)
2732 		return status;
2733 
2734 	status = __hal_device_rth_it_configure(hldev);
2735 	if (status != XGE_HAL_OK)
2736 		return status;
2737 
2738 	status = __hal_device_rth_spdm_configure(hldev);
2739 	if (status != XGE_HAL_OK)
2740 		return status;
2741 
2742 	status = __hal_device_rts_mac_configure(hldev);
2743 	if (status != XGE_HAL_OK) {
2744 		xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed \n");
2745 		return status;
2746 	}
2747 
2748 	status = __hal_device_rts_qos_configure(hldev);
2749 	if (status != XGE_HAL_OK) {
2750 		xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed \n");
2751 		return status;
2752 	}
2753 
2754 	__hal_device_pause_frames_configure(hldev);
2755 	__hal_device_rmac_padding_configure(hldev);
2756 	__hal_device_shared_splits_configure(hldev);
2757 
2758 	/* make sure all interrupts going to be disabled at the moment */
2759 	__hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
2760 
2761 	/* SXE-008 Transmit DMA arbitration issue */
2762 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
2763 	    hldev->revision < 4) {
2764 		xge_os_pio_mem_write64(hldev->pdev,hldev->regh0,
2765 				XGE_HAL_ADAPTER_PCC_ENABLE_FOUR,
2766 				&bar0->pcc_enable);
2767 	}
2768 	__hal_fifo_hw_initialize(hldev);
2769 	__hal_ring_hw_initialize(hldev);
2770 
2771 	if (__hal_device_wait_quiescent(hldev, &val64)) {
2772 		return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
2773 	}
2774 
2775 	if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
2776 		XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
2777 		 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
2778 		xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
2779 		return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
2780 	}
2781 
2782 	xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent",
2783 			  (unsigned long long)(ulong_t)hldev);
2784 
2785 #if defined(XGE_HAL_MSI)
2786 	/*
2787 	 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL
2788 	 * is disabled.
2789 	 */
2790 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2791 	                            &bar0->pic_control);
2792 	val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT);
2793 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2794 	                            &bar0->pic_control);
2795 #endif
2796 
2797 	hldev->hw_is_initialized = 1;
2798 	hldev->terminating = 0;
2799 	return XGE_HAL_OK;
2800 }
2801 
2802 /*
2803  * __hal_device_reset - Reset device only.
2804  * @hldev: HAL device handle.
2805  *
2806  * Reset the device, and subsequently restore
2807  * the previously saved PCI configuration space.
2808  */
2809 #define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50
2810 static xge_hal_status_e
2811 __hal_device_reset(xge_hal_device_t *hldev)
2812 {
2813 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2814 	int i, j, swap_done, pcisize = 0;
2815 	u64 val64, rawval = 0ULL;
2816 
2817 #if defined(XGE_HAL_MSI_X)
2818 	/* Restore MSI-X vector table */
2819 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2820 		if ( hldev->bar2 ) {
2821 			u64 *msix_vetor_table = (u64 *)hldev->bar2;
2822 
2823 			// 2 64bit words for each entry
2824 			for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; i++) {
2825 				hldev->msix_vector_table[i] = xge_os_pio_mem_read64(hldev->pdev,
2826 					hldev->regh2, &msix_vetor_table[i]);
2827 			}
2828 		}
2829 	}
2830 
2831 #endif
2832 
2833 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2834 	                            &bar0->pif_rd_swapper_fb);
2835 	swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB);
2836 
2837 	if (swap_done) {
2838 		__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
2839 		     (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset);
2840 	} else {
2841 		u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32);
2842 #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN)
2843 		/* swap it */
2844 		val = (((val & (u32)0x000000ffUL) << 24) |
2845 		       ((val & (u32)0x0000ff00UL) <<  8) |
2846 		       ((val & (u32)0x00ff0000UL) >>  8) |
2847 		       ((val & (u32)0xff000000UL) >> 24));
2848 #endif
2849 		xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val,
2850 				     &bar0->sw_reset);
2851 	}
2852 
2853 	pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
2854 			   XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
2855 
2856 	xge_os_mdelay(20); /* Wait for 20 ms after reset */
2857 
2858 	{
2859 		/* Poll for no more than 1 second */
2860 		for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++)
2861 		{
2862 			for (j = 0; j < pcisize; j++) {
2863 				xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
2864 					*((u32*)&hldev->pci_config_space + j));
2865 			}
2866 
2867 			xge_os_pci_read16(hldev->pdev,hldev->cfgh,
2868 				xge_offsetof(xge_hal_pci_config_le_t, device_id),
2869 				&hldev->device_id);
2870 
2871 			if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN)
2872 				break;
2873 			xge_os_mdelay(20);
2874 		}
2875 	}
2876 
2877 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN)
2878 	{
2879 		xge_debug_device(XGE_ERR, "device reset failed");
2880 			return XGE_HAL_ERR_RESET_FAILED;
2881 	}
2882 
2883 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2884 		int cnt = 0;
2885 
2886 		rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC;
2887 		pcisize = XGE_HAL_PCISIZE_HERC;
2888 		xge_os_mdelay(1);
2889 		do {
2890 			val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2891 				&bar0->sw_reset);
2892 			if (val64 != rawval) {
2893 				break;
2894 			}
2895 			cnt++;
2896 			xge_os_mdelay(1); /* Wait for 1ms before retry */
2897 		} while(cnt < 20);
2898 	} else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
2899 		rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA;
2900 		pcisize = XGE_HAL_PCISIZE_XENA;
2901 		xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS);
2902 	}
2903 
2904 #if defined(XGE_HAL_MSI_X)
2905 	/* Restore MSI-X vector table */
2906 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2907 		if ( hldev->bar2 ) {
2908 		/*
2909 			94: MSIXTable      00000004  ( BIR:4  Offset:0x0 )
2910 			98: PBATable       00000404  ( BIR:4  Offset:0x400 )
2911 		*/
2912 			u64 *msix_vetor_table = (u64 *)hldev->bar2;
2913 
2914 			//xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2915 			//xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid);
2916 
2917 			// 2 64bit words for each entry
2918 			for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; i++) {
2919 				xge_os_pio_mem_write64(hldev->pdev, hldev->regh2,
2920 								hldev->msix_vector_table[i], &msix_vetor_table[i]);
2921 			}
2922 		}
2923 	}
2924 
2925 #endif
2926 
2927 	hldev->link_state = XGE_HAL_LINK_DOWN;
2928 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2929                                       &bar0->sw_reset);
2930 
2931 	if (val64 != rawval) {
2932 		xge_debug_device(XGE_ERR, "device has not been reset "
2933 			"got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT,
2934 			(unsigned long long)val64, (unsigned long long)rawval);
2935 	        return XGE_HAL_ERR_RESET_FAILED;
2936 	}
2937 
2938 	hldev->hw_is_initialized = 0;
2939 	return XGE_HAL_OK;
2940 }
2941 
2942 /*
2943  * __hal_device_poll - General private routine to poll the device.
2944  * @hldev: HAL device handle.
2945  *
2946  * Returns: one of the xge_hal_status_e{} enumerated types.
2947  * XGE_HAL_OK			- for success.
2948  * XGE_HAL_ERR_CRITICAL         - when encounters critical error.
2949  */
2950 static xge_hal_status_e
2951 __hal_device_poll(xge_hal_device_t *hldev)
2952 {
2953 	xge_hal_pci_bar0_t *bar0;
2954 	u64 err_reg;
2955 
2956 	bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2957 
2958 	/* Handling SERR errors by forcing a H/W reset. */
2959 	err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2960 				      &bar0->serr_source);
2961 	if (err_reg & XGE_HAL_SERR_SOURCE_ANY) {
2962 		__hal_device_handle_serr(hldev, "serr_source", err_reg);
2963 		return XGE_HAL_ERR_CRITICAL;
2964 	}
2965 
2966 	err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2967 					&bar0->misc_int_reg);
2968 
2969 	if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) {
2970 		hldev->stats.sw_dev_err_stats.parity_err_cnt++;
2971 		__hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg);
2972 		return XGE_HAL_ERR_CRITICAL;
2973 	}
2974 
2975 #ifdef  XGE_HAL_PROCESS_LINK_INT_IN_ISR
2976 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
2977 #endif
2978 	{
2979 
2980 		/* Handling link status change error Intr */
2981 		err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2982 						&bar0->mac_rmac_err_reg);
2983 		if (__hal_device_handle_link_state_change(hldev))
2984 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2985 				       err_reg, &bar0->mac_rmac_err_reg);
2986 	}
2987 
2988 	if (hldev->inject_serr != 0) {
2989 		err_reg = hldev->inject_serr;
2990 		hldev->inject_serr = 0;
2991 		__hal_device_handle_serr(hldev, "inject_serr", err_reg);
2992 		return XGE_HAL_ERR_CRITICAL;
2993         }
2994 
2995         if (hldev->inject_ecc != 0) {
2996                 err_reg = hldev->inject_ecc;
2997                 hldev->inject_ecc = 0;
2998 		hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
2999                 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg);
3000 		return XGE_HAL_ERR_CRITICAL;
3001         }
3002 
3003 	if (hldev->inject_bad_tcode != 0) {
3004 		u8 t_code = hldev->inject_bad_tcode;
3005 		xge_hal_channel_t channel;
3006 		xge_hal_fifo_txd_t txd;
3007 		xge_hal_ring_rxd_1_t rxd;
3008 
3009 		channel.devh =  hldev;
3010 
3011 		if (hldev->inject_bad_tcode_for_chan_type ==
3012 						XGE_HAL_CHANNEL_TYPE_FIFO) {
3013 			channel.type = XGE_HAL_CHANNEL_TYPE_FIFO;
3014 
3015 		} else {
3016 			channel.type = XGE_HAL_CHANNEL_TYPE_RING;
3017 		}
3018 
3019                 hldev->inject_bad_tcode = 0;
3020 
3021 		if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO)
3022 			return xge_hal_device_handle_tcode(&channel, &txd,
3023 			                                   t_code);
3024 		else
3025 			return xge_hal_device_handle_tcode(&channel, &rxd,
3026 			                                   t_code);
3027         }
3028 
3029 	return XGE_HAL_OK;
3030 }
3031 
3032 /*
3033  * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not
3034  * @hldev: HAL device handle.
3035  * @adp_status: Adapter Status value
3036  * Usage: See xge_hal_device_enable{}.
3037  */
3038 xge_hal_status_e
3039 __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status)
3040 {
3041 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
3042 	    hldev->revision < 4) {
3043 		/*
3044 		 * For Xena 1,2,3 we enable only 4 PCCs Due to
3045 		 * SXE-008 (Transmit DMA arbitration issue)
3046 		 */
3047 		if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE)
3048 			!= XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) {
3049 			xge_debug_device(XGE_TRACE, "%s",
3050 			    "PCC is not IDLE after adapter enabled!");
3051 			return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3052 		}
3053 	} else {
3054 		if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) !=
3055 			XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) {
3056 			xge_debug_device(XGE_TRACE, "%s",
3057 			"PCC is not IDLE after adapter enabled!");
3058 			return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3059 		}
3060 	}
3061 	return XGE_HAL_OK;
3062 }
3063 
3064 static void
3065 __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no)
3066 {
3067 	int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist;
3068 	int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg;
3069 	int iwl_cnt, i;
3070 
3071 #define _HIST_SIZE	50 /* 0.5 sec history */
3072 #define _HIST_ADJ_TIMER	1
3073 #define _STEP		2
3074 
3075 	static int bytes_avg_history[_HIST_SIZE] = {0};
3076 	static int d_avg_history[_HIST_SIZE] = {0};
3077 	static int history_idx = 0;
3078 	static int pstep = 1;
3079 	static int hist_adj_timer = 0;
3080 
3081 	/*
3082 	 * tval - current value of this bimodal timer
3083 	 */
3084 	tval = hldev->bimodal_tti[ring_no].timer_val_us;
3085 
3086 	/*
3087 	 * d - how many interrupts we were getting since last
3088 	 *     bimodal timer tick.
3089 	 */
3090 	d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt -
3091 		hldev->bimodal_intr_cnt;
3092 
3093 	/* advance bimodal interrupt counter */
3094 	hldev->bimodal_intr_cnt =
3095 		hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
3096 
3097 	/*
3098 	 * iwl_cnt - how many interrupts we've got since last
3099 	 *           bimodal timer tick.
3100 	 */
3101 	iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ?
3102                      hldev->irq_workload_rxcnt[ring_no] : 1);
3103 	iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ?
3104                      hldev->irq_workload_txcnt[ring_no] : 1);
3105 	iwl_cnt = iwl_rxcnt + iwl_txcnt;
3106 	iwl_cnt = iwl_cnt; /* just to remove the lint warning */
3107 
3108 	/*
3109 	 * we need to take hldev->config.isr_polling_cnt into account
3110 	 * but for some reason this line causing GCC to produce wrong
3111 	 * code on Solaris. As of now, if bimodal_interrupts is configured
3112 	 * hldev->config.isr_polling_cnt is forced to be "0".
3113 	 *
3114 	 * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */
3115 
3116 	/*
3117 	 * iwl_avg - how many RXDs on avarage been processed since
3118 	 *           last bimodal timer tick. This indirectly includes
3119 	 *           CPU utilizations.
3120 	 */
3121 	iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt;
3122 	iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt;
3123 	iwl_avg = iwl_rxavg + iwl_txavg;
3124 	iwl_avg = iwl_avg == 0 ? 1 : iwl_avg;
3125 
3126 	/*
3127 	 * len_avg - how many bytes on avarage been processed since
3128 	 *           last bimodal timer tick. i.e. avarage frame size.
3129 	 */
3130 	len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] /
3131 		       (hldev->irq_workload_rxd[ring_no] ?
3132 		        hldev->irq_workload_rxd[ring_no] : 1);
3133 	len_txavg = 1 + hldev->irq_workload_txlen[ring_no] /
3134 		       (hldev->irq_workload_txd[ring_no] ?
3135 		        hldev->irq_workload_txd[ring_no] : 1);
3136 	len_avg = len_rxavg + len_txavg;
3137 	if (len_avg < 60)
3138 		len_avg = 60;
3139 
3140 	/* align on low boundary */
3141 	if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us)
3142 		tval = hldev->config.bimodal_timer_lo_us;
3143 
3144 	/* reset faster */
3145 	if (iwl_avg == 1) {
3146 		tval = hldev->config.bimodal_timer_lo_us;
3147 		/* reset history */
3148 		for (i = 0; i < _HIST_SIZE; i++)
3149 			bytes_avg_history[i] = d_avg_history[i] = 0;
3150 		history_idx = 0;
3151 		pstep = 1;
3152 		hist_adj_timer = 0;
3153 	}
3154 
3155 	/* always try to ajust timer to the best throughput value */
3156 	bytes_avg = iwl_avg * len_avg;
3157 	history_idx %= _HIST_SIZE;
3158 	bytes_avg_history[history_idx] = bytes_avg;
3159 	d_avg_history[history_idx] = d;
3160 	history_idx++;
3161 	d_hist = bytes_hist = 0;
3162 	for (i = 0; i < _HIST_SIZE; i++) {
3163 		/* do not re-configure until history is gathered */
3164 		if (!bytes_avg_history[i]) {
3165 			tval = hldev->config.bimodal_timer_lo_us;
3166 			goto _end;
3167 		}
3168 		bytes_hist += bytes_avg_history[i];
3169 		d_hist += d_avg_history[i];
3170 	}
3171 	bytes_hist /= _HIST_SIZE;
3172 	d_hist /= _HIST_SIZE;
3173 
3174 //	xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d",
3175 //		      d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg,
3176 //		      d_hist*bytes_hist, pstep);
3177 
3178 	/* make an adaptive step */
3179 	if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) {
3180 		pstep = !pstep;
3181 		hist_adj_timer = 0;
3182 	}
3183 
3184 	if (pstep &&
3185 	    (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) {
3186 		tval += _STEP;
3187 		hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++;
3188 	} else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) {
3189 		tval -= _STEP;
3190 		hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++;
3191 	}
3192 
3193 	/* enable TTI range A for better latencies */
3194 	hldev->bimodal_urange_a_en = 0;
3195 	if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2)
3196 		hldev->bimodal_urange_a_en = 1;
3197 
3198 _end:
3199 	/* reset workload statistics counters */
3200 	hldev->irq_workload_rxcnt[ring_no] = 0;
3201 	hldev->irq_workload_rxd[ring_no] = 0;
3202 	hldev->irq_workload_rxlen[ring_no] = 0;
3203 	hldev->irq_workload_txcnt[ring_no] = 0;
3204 	hldev->irq_workload_txd[ring_no] = 0;
3205 	hldev->irq_workload_txlen[ring_no] = 0;
3206 
3207 	/* reconfigure TTI56 + ring_no with new timer value */
3208 	hldev->bimodal_timer_val_us = tval;
3209 	(void) __hal_device_rti_configure(hldev, 1);
3210 }
3211 
3212 static void
3213 __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no)
3214 {
3215 	int ufc, ic, i;
3216 
3217 	ufc = hldev->config.ring.queue[ring_no].rti.ufc_a;
3218 	ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
3219 
3220 	/* urange_a adaptive coalescing */
3221 	if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) {
3222 		if (ic > hldev->rxufca_intr_thres) {
3223 			if (ufc < hldev->config.rxufca_hi_lim) {
3224 				ufc += 1;
3225 				for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3226 				   hldev->config.ring.queue[i].rti.ufc_a = ufc;
3227 				(void) __hal_device_rti_configure(hldev, 1);
3228 				hldev->stats.sw_dev_info_stats.
3229 					rxufca_hi_adjust_cnt++;
3230 			}
3231 			hldev->rxufca_intr_thres = ic +
3232 				hldev->config.rxufca_intr_thres; /* def: 30 */
3233 		} else {
3234 			if (ufc > hldev->config.rxufca_lo_lim) {
3235 				ufc -= 1;
3236 				for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3237 				   hldev->config.ring.queue[i].rti.ufc_a = ufc;
3238 				(void) __hal_device_rti_configure(hldev, 1);
3239 				hldev->stats.sw_dev_info_stats.
3240 					rxufca_lo_adjust_cnt++;
3241 			}
3242 		}
3243 		hldev->rxufca_lbolt_time = hldev->rxufca_lbolt +
3244 			hldev->config.rxufca_lbolt_period;
3245 	}
3246 	hldev->rxufca_lbolt++;
3247 }
3248 
3249 /*
3250  * __hal_device_handle_mc - Handle MC interrupt reason
3251  * @hldev: HAL device handle.
3252  * @reason: interrupt reason
3253  */
3254 xge_hal_status_e
3255 __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason)
3256 {
3257 	xge_hal_pci_bar0_t *isrbar0 =
3258 	        (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3259 	u64 val64;
3260 
3261 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3262 				&isrbar0->mc_int_status);
3263 	if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT))
3264 		return XGE_HAL_OK;
3265 
3266 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3267 				&isrbar0->mc_err_reg);
3268 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3269 				val64, &isrbar0->mc_err_reg);
3270 
3271 	if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L ||
3272 	    val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U ||
3273 	    val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 ||
3274 	    val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 ||
3275 	    (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3276 	     (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L ||
3277 	      val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U ||
3278 	      val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L ||
3279 	      val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) {
3280 		hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++;
3281 		hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3282 	}
3283 
3284 	if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L ||
3285 	    val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U ||
3286 	    val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3287 	    val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 ||
3288 	    (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3289 	     (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L ||
3290 	      val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U ||
3291 	      val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L ||
3292 	      val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) {
3293 		hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++;
3294 		hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3295 	}
3296 
3297 	if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) {
3298 		hldev->stats.sw_dev_err_stats.sm_err_cnt++;
3299 	}
3300 
3301 	/* those two should result in device reset */
3302 	if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3303 	    val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) {
3304                 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64);
3305 		return XGE_HAL_ERR_CRITICAL;
3306 	}
3307 
3308 	return XGE_HAL_OK;
3309 }
3310 
3311 /*
3312  * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason
3313  * @hldev: HAL device handle.
3314  * @reason: interrupt reason
3315  */
3316 xge_hal_status_e
3317 __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason)
3318 {
3319 	xge_hal_pci_bar0_t *isrbar0 =
3320 	        (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3321 	u64 val64;
3322 
3323 	if (reason & XGE_HAL_PIC_INT_FLSH) {
3324 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3325 					&isrbar0->flsh_int_reg);
3326 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3327 				       val64, &isrbar0->flsh_int_reg);
3328 		/* FIXME: handle register */
3329 	}
3330 	if (reason & XGE_HAL_PIC_INT_MDIO) {
3331 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3332 					&isrbar0->mdio_int_reg);
3333 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3334 				       val64, &isrbar0->mdio_int_reg);
3335 		/* FIXME: handle register */
3336 	}
3337 	if (reason & XGE_HAL_PIC_INT_IIC) {
3338 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3339 					&isrbar0->iic_int_reg);
3340 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3341 				       val64, &isrbar0->iic_int_reg);
3342 		/* FIXME: handle register */
3343 	}
3344 	if (reason & XGE_HAL_PIC_INT_MISC) {
3345 		val64 = xge_os_pio_mem_read64(hldev->pdev,
3346 				hldev->regh0, &isrbar0->misc_int_reg);
3347 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3348 		if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3349 			/*  Check for Link interrupts. If both Link Up/Down
3350 			 *  bits are set, clear both and check adapter status
3351 			 */
3352 			if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) &&
3353 			    (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) {
3354 				u64 temp64;
3355 
3356 				xge_debug_device(XGE_TRACE,
3357 				"both link up and link down detected "XGE_OS_LLXFMT,
3358 				(unsigned long long)val64);
3359 
3360 				temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT |
3361 					  XGE_HAL_MISC_INT_REG_LINK_UP_INT);
3362 				xge_os_pio_mem_write64(hldev->pdev,
3363 						       hldev->regh0, temp64,
3364 						       &isrbar0->misc_int_reg);
3365 			}
3366 			else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) {
3367 				xge_debug_device(XGE_TRACE,
3368 					"link up call request, misc_int "XGE_OS_LLXFMT,
3369 					(unsigned long long)val64);
3370 				__hal_device_handle_link_up_ind(hldev);
3371 			}
3372 			else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){
3373 				xge_debug_device(XGE_TRACE,
3374 					"link down request, misc_int "XGE_OS_LLXFMT,
3375 					(unsigned long long)val64);
3376 				__hal_device_handle_link_down_ind(hldev);
3377 			}
3378 		} else
3379 #endif
3380 		{
3381 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3382 				       val64, &isrbar0->misc_int_reg);
3383 		}
3384 	}
3385 
3386 	return XGE_HAL_OK;
3387 }
3388 
3389 /*
3390  * __hal_device_handle_txpic - Handle TxPIC interrupt reason
3391  * @hldev: HAL device handle.
3392  * @reason: interrupt reason
3393  */
3394 xge_hal_status_e
3395 __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason)
3396 {
3397 	xge_hal_status_e status = XGE_HAL_OK;
3398 	xge_hal_pci_bar0_t *isrbar0 =
3399 	        (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3400 	volatile u64 val64;
3401 
3402 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3403 				&isrbar0->pic_int_status);
3404 	if ( val64 & (XGE_HAL_PIC_INT_FLSH |
3405 		      XGE_HAL_PIC_INT_MDIO |
3406 		      XGE_HAL_PIC_INT_IIC |
3407 		      XGE_HAL_PIC_INT_MISC) ) {
3408 		status =  __hal_device_handle_pic(hldev, val64);
3409 		xge_os_wmb();
3410 	}
3411 
3412 	if (!(val64 & XGE_HAL_PIC_INT_TX))
3413 		return status;
3414 
3415 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3416 				&isrbar0->txpic_int_reg);
3417 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3418 			       val64, &isrbar0->txpic_int_reg);
3419 	xge_os_wmb();
3420 
3421 	if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) {
3422 		int i;
3423 
3424 		if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL)
3425 			g_xge_hal_driver->uld_callbacks.sched_timer(
3426 					  hldev, hldev->upper_layer_info);
3427 		/*
3428 		 * This feature implements adaptive receive interrupt
3429 		 * coalecing. It is disabled by default. To enable it
3430 		 * set hldev->config.rxufca_lo_lim to be not equal to
3431 		 * hldev->config.rxufca_hi_lim.
3432 		 *
3433 		 * We are using HW timer for this feature, so
3434 		 * use needs to configure hldev->config.rxufca_lbolt_period
3435 		 * which is essentially a time slice of timer.
3436 		 *
3437 		 * For those who familiar with Linux, lbolt means jiffies
3438 		 * of this timer. I.e. timer tick.
3439 		 */
3440 		if (hldev->config.rxufca_lo_lim !=
3441 				hldev->config.rxufca_hi_lim &&
3442 		    hldev->config.rxufca_lo_lim != 0) {
3443 			for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3444 				if (!hldev->config.ring.queue[i].configured)
3445 					continue;
3446 				if (hldev->config.ring.queue[i].rti.urange_a)
3447 					__hal_update_rxufca(hldev, i);
3448 			}
3449 		}
3450 
3451 		/*
3452 		 * This feature implements adaptive TTI timer re-calculation
3453 		 * based on host utilization, number of interrupt processed,
3454 		 * number of RXD per tick and avarage length of packets per
3455 		 * tick.
3456 		 */
3457 		if (hldev->config.bimodal_interrupts) {
3458 			for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3459 				if (!hldev->config.ring.queue[i].configured)
3460 					continue;
3461 				if (hldev->bimodal_tti[i].enabled)
3462 					__hal_update_bimodal(hldev, i);
3463 			}
3464 		}
3465 	}
3466 
3467 	return XGE_HAL_OK;
3468 }
3469 
3470 /*
3471  * __hal_device_handle_txdma - Handle TxDMA interrupt reason
3472  * @hldev: HAL device handle.
3473  * @reason: interrupt reason
3474  */
3475 xge_hal_status_e
3476 __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason)
3477 {
3478 	xge_hal_pci_bar0_t *isrbar0 =
3479 	        (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3480 	u64 val64, err;
3481 
3482 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3483 				&isrbar0->txdma_int_status);
3484 	if (val64 & XGE_HAL_TXDMA_PFC_INT) {
3485 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3486 				&isrbar0->pfc_err_reg);
3487 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3488 				err, &isrbar0->pfc_err_reg);
3489 		/* FIXME: handle register */
3490 	}
3491 	if (val64 & XGE_HAL_TXDMA_TDA_INT) {
3492 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3493 				&isrbar0->tda_err_reg);
3494 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3495 				err, &isrbar0->tda_err_reg);
3496 		/* FIXME: handle register */
3497 	}
3498 	if (val64 & XGE_HAL_TXDMA_PCC_INT) {
3499 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3500 				&isrbar0->pcc_err_reg);
3501 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3502 				err, &isrbar0->pcc_err_reg);
3503 		/* FIXME: handle register */
3504 	}
3505 	if (val64 & XGE_HAL_TXDMA_TTI_INT) {
3506 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3507 				&isrbar0->tti_err_reg);
3508 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3509 				err, &isrbar0->tti_err_reg);
3510 		/* FIXME: handle register */
3511 	}
3512 	if (val64 & XGE_HAL_TXDMA_LSO_INT) {
3513 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3514 				&isrbar0->lso_err_reg);
3515 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3516 				err, &isrbar0->lso_err_reg);
3517 		/* FIXME: handle register */
3518 	}
3519 	if (val64 & XGE_HAL_TXDMA_TPA_INT) {
3520 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3521 				&isrbar0->tpa_err_reg);
3522 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3523 				err, &isrbar0->tpa_err_reg);
3524 		/* FIXME: handle register */
3525 	}
3526 	if (val64 & XGE_HAL_TXDMA_SM_INT) {
3527 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3528 				&isrbar0->sm_err_reg);
3529 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3530 				err, &isrbar0->sm_err_reg);
3531 		/* FIXME: handle register */
3532 	}
3533 
3534 	return XGE_HAL_OK;
3535 }
3536 
3537 /*
3538  * __hal_device_handle_txmac - Handle TxMAC interrupt reason
3539  * @hldev: HAL device handle.
3540  * @reason: interrupt reason
3541  */
3542 xge_hal_status_e
3543 __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason)
3544 {
3545 	xge_hal_pci_bar0_t *isrbar0 =
3546 	        (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3547 	u64 val64;
3548 
3549 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3550 				&isrbar0->mac_int_status);
3551 	if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT))
3552 		return XGE_HAL_OK;
3553 
3554 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3555 				&isrbar0->mac_tmac_err_reg);
3556 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3557 				val64, &isrbar0->mac_tmac_err_reg);
3558 	/* FIXME: handle register */
3559 
3560 	return XGE_HAL_OK;
3561 }
3562 
3563 /*
3564  * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason
3565  * @hldev: HAL device handle.
3566  * @reason: interrupt reason
3567  */
3568 xge_hal_status_e
3569 __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason)
3570 {
3571 	/* FIXME: handle register */
3572 
3573 	return XGE_HAL_OK;
3574 }
3575 
3576 /*
3577  * __hal_device_handle_rxpic - Handle RxPIC interrupt reason
3578  * @hldev: HAL device handle.
3579  * @reason: interrupt reason
3580  */
3581 xge_hal_status_e
3582 __hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason)
3583 {
3584 	/* FIXME: handle register */
3585 
3586 	return XGE_HAL_OK;
3587 }
3588 
3589 /*
3590  * __hal_device_handle_rxdma - Handle RxDMA interrupt reason
3591  * @hldev: HAL device handle.
3592  * @reason: interrupt reason
3593  */
3594 xge_hal_status_e
3595 __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason)
3596 {
3597 	xge_hal_pci_bar0_t *isrbar0 =
3598 	        (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3599 	u64 val64, err;
3600 
3601 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3602 				&isrbar0->rxdma_int_status);
3603 	if (val64 & XGE_HAL_RXDMA_RC_INT) {
3604 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3605 				&isrbar0->rc_err_reg);
3606 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3607 				err, &isrbar0->rc_err_reg);
3608 		/* FIXME: handle register */
3609 	}
3610 	if (val64 & XGE_HAL_RXDMA_RPA_INT) {
3611 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3612 				&isrbar0->rpa_err_reg);
3613 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3614 				err, &isrbar0->rpa_err_reg);
3615 		/* FIXME: handle register */
3616 	}
3617 	if (val64 & XGE_HAL_RXDMA_RDA_INT) {
3618 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3619 				&isrbar0->rda_err_reg);
3620 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3621 				err, &isrbar0->rda_err_reg);
3622 		/* FIXME: handle register */
3623 	}
3624 	if (val64 & XGE_HAL_RXDMA_RTI_INT) {
3625 		err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3626 				&isrbar0->rti_err_reg);
3627 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3628 				err, &isrbar0->rti_err_reg);
3629 		/* FIXME: handle register */
3630 	}
3631 
3632 	return XGE_HAL_OK;
3633 }
3634 
3635 /*
3636  * __hal_device_handle_rxmac - Handle RxMAC interrupt reason
3637  * @hldev: HAL device handle.
3638  * @reason: interrupt reason
3639  */
3640 xge_hal_status_e
3641 __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason)
3642 {
3643 	xge_hal_pci_bar0_t *isrbar0 =
3644 	        (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3645 	u64 val64;
3646 
3647 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3648 				&isrbar0->mac_int_status);
3649 	if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT))
3650 		return XGE_HAL_OK;
3651 
3652 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3653 				&isrbar0->mac_rmac_err_reg);
3654 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3655 				val64, &isrbar0->mac_rmac_err_reg);
3656 
3657 	/* FIXME: handle register */
3658 
3659 	return XGE_HAL_OK;
3660 }
3661 
3662 /*
3663  * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason
3664  * @hldev: HAL device handle.
3665  * @reason: interrupt reason
3666  */
3667 xge_hal_status_e
3668 __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason)
3669 {
3670 	/* FIXME: handle register */
3671 
3672 	return XGE_HAL_OK;
3673 }
3674 
3675 /**
3676  * xge_hal_device_enable - Enable device.
3677  * @hldev: HAL device handle.
3678  *
3679  * Enable the specified device: bring up the link/interface.
3680  * Returns:  XGE_HAL_OK - success.
3681  * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device
3682  * to a "quiescent" state.
3683  *
3684  * See also: xge_hal_status_e{}.
3685  *
3686  * Usage: See ex_open{}.
3687  */
3688 xge_hal_status_e
3689 xge_hal_device_enable(xge_hal_device_t *hldev)
3690 {
3691 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3692 	u64 val64;
3693 	u64 adp_status;
3694 	int i, j;
3695 
3696 	if (!hldev->hw_is_initialized) {
3697 		xge_hal_status_e status;
3698 
3699 		status = __hal_device_hw_initialize(hldev);
3700 		if (status != XGE_HAL_OK) {
3701 			return status;
3702 		}
3703 	}
3704 
3705 	/*
3706 	 * Not needed in most cases, i.e.
3707 	 * when device_disable() is followed by reset -
3708 	 * the latter copies back PCI config space, along with
3709 	 * the bus mastership - see __hal_device_reset().
3710 	 * However, there are/may-in-future be other cases, and
3711 	 * does not hurt.
3712 	 */
3713 	__hal_device_bus_master_enable(hldev);
3714 
3715 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3716 		/*
3717 		 * Configure the link stability period.
3718 		 */
3719 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3720 					      &bar0->misc_control);
3721 		if (hldev->config.link_stability_period !=
3722 				XGE_HAL_DEFAULT_USE_HARDCODE) {
3723 
3724 			val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
3725 					hldev->config.link_stability_period);
3726 		} else {
3727 			/*
3728 			 * Use the link stability period 1 ms as default
3729 			 */
3730 			val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
3731 					XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD);
3732 		}
3733 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3734 				       val64, &bar0->misc_control);
3735 
3736 		/*
3737 		 * Clearing any possible Link up/down interrupts that
3738 		 * could have popped up just before Enabling the card.
3739 		 */
3740 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3741 					      &bar0->misc_int_reg);
3742 		if (val64) {
3743 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3744 					       val64, &bar0->misc_int_reg);
3745 			xge_debug_device(XGE_TRACE, "%s","link state cleared");
3746 		}
3747 	} else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
3748 		/*
3749 		 * Clearing any possible Link state change interrupts that
3750 		 * could have popped up just before Enabling the card.
3751 		 */
3752 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3753 			&bar0->mac_rmac_err_reg);
3754 		if (val64) {
3755 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3756 					       val64, &bar0->mac_rmac_err_reg);
3757 			xge_debug_device(XGE_TRACE, "%s", "link state cleared");
3758 		}
3759 	}
3760 
3761 	if (__hal_device_wait_quiescent(hldev, &val64)) {
3762 		return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3763 	}
3764 
3765 	/* Enabling Laser. */
3766 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3767 				    &bar0->adapter_control);
3768 	val64 |= XGE_HAL_ADAPTER_EOI_TX_ON;
3769 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
3770 	                     &bar0->adapter_control);
3771 
3772 	/* let link establish */
3773 	xge_os_mdelay(1);
3774 
3775 	/* set link down untill poll() routine will set it up (maybe) */
3776 	hldev->link_state = XGE_HAL_LINK_DOWN;
3777 
3778 	/* If link is UP (adpter is connected) then enable the adapter */
3779 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3780 	                            &bar0->adapter_status);
3781 	if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
3782 		     XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) {
3783 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3784 		                        &bar0->adapter_control);
3785 		val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
3786 	} else {
3787 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3788 		                           &bar0->adapter_control);
3789 		val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON |
3790 				  XGE_HAL_ADAPTER_LED_ON );
3791 	}
3792 
3793 	val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN;   /* adapter enable */
3794 	val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
3795 	xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64,
3796 			      &bar0->adapter_control);
3797 
3798 	/* We spin here waiting for the Link to come up.
3799 	 * This is the fix for the Link being unstable after the reset. */
3800 	i = 0;
3801 	j = 0;
3802 	do
3803 	{
3804 		adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3805 		                                &bar0->adapter_status);
3806 
3807 		/* Read the adapter control register for Adapter_enable bit */
3808 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3809 		                           &bar0->adapter_control);
3810 		if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
3811 				    XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) &&
3812 		    (val64 & XGE_HAL_ADAPTER_CNTL_EN)) {
3813 			j++;
3814 			if (j >= hldev->config.link_valid_cnt) {
3815 				if (xge_hal_device_status(hldev, &adp_status) ==
3816 							XGE_HAL_OK) {
3817 					if (__hal_verify_pcc_idle(hldev,
3818 						  adp_status) != XGE_HAL_OK) {
3819 					   return
3820 					    XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3821 					}
3822 					xge_debug_device(XGE_TRACE,
3823 					      "adp_status: "XGE_OS_LLXFMT
3824 					      ", link is up on "
3825 					      "adapter enable!",
3826 					      (unsigned long long)adp_status);
3827 					val64 = xge_os_pio_mem_read64(
3828 							hldev->pdev,
3829 							hldev->regh0,
3830 							&bar0->adapter_control);
3831 					val64 = val64|
3832 						(XGE_HAL_ADAPTER_EOI_TX_ON |
3833 						 XGE_HAL_ADAPTER_LED_ON );
3834 					xge_os_pio_mem_write64(hldev->pdev,
3835 					                hldev->regh0, val64,
3836 					                &bar0->adapter_control);
3837 					xge_os_mdelay(1);
3838 
3839 					val64 = xge_os_pio_mem_read64(
3840 							hldev->pdev,
3841 							hldev->regh0,
3842 							&bar0->adapter_control);
3843 					break;    /* out of for loop */
3844 				} else {
3845 				       return
3846 					   XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3847 				}
3848 			}
3849 		} else {
3850 			j = 0;  /* Reset the count */
3851 			/* Turn on the Laser */
3852 			val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3853 			                &bar0->adapter_control);
3854 			val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON;
3855 			xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0,
3856 						val64, &bar0->adapter_control);
3857 
3858 			xge_os_mdelay(1);
3859 
3860 			/* Now re-enable it as due to noise, hardware
3861 			 * turned it off */
3862 			val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3863 			                            &bar0->adapter_control);
3864 			val64 |= XGE_HAL_ADAPTER_CNTL_EN;
3865 			val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/
3866 			xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
3867 			                    &bar0->adapter_control);
3868 		}
3869 		xge_os_mdelay(1); /* Sleep for 1 msec */
3870 		i++;
3871 	} while (i < hldev->config.link_retry_cnt);
3872 
3873 	__hal_device_led_actifity_fix(hldev);
3874 
3875 #ifndef  XGE_HAL_PROCESS_LINK_INT_IN_ISR
3876 	/* Here we are performing soft reset on XGXS to force link down.
3877 	 * Since link is already up, we will get link state change
3878 	 * poll notificatoin after adapter is enabled */
3879 
3880 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3881 			0x80010515001E0000ULL, &bar0->dtx_control);
3882 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3883 			&bar0->dtx_control);
3884 	xge_os_mdelay(1); /* Sleep for 1 msec */
3885 
3886 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3887 			0x80010515001E00E0ULL, &bar0->dtx_control);
3888 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3889 			&bar0->dtx_control);
3890 	xge_os_mdelay(1); /* Sleep for 1 msec */
3891 
3892 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3893 			0x80070515001F00E4ULL, &bar0->dtx_control);
3894 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3895 			&bar0->dtx_control);
3896 
3897 	xge_os_mdelay(100); /* Sleep for 500 msec */
3898 #else
3899 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
3900 #endif
3901 	{
3902 		/*
3903 		 * With some switches the link state change interrupt does not
3904 		 * occur even though the xgxs reset is done as per SPN-006. So,
3905 		 * poll the adapter status register and check if the link state
3906 		 * is ok.
3907 		 */
3908 		adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3909 						   &bar0->adapter_status);
3910 		if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
3911 		      XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
3912 		{
3913 			xge_debug_device(XGE_TRACE, "%s",
3914 			     "enable device causing link state change ind..");
3915 			(void) __hal_device_handle_link_state_change(hldev);
3916 		}
3917 	}
3918 
3919 	if (hldev->config.stats_refresh_time_sec !=
3920 	    XGE_HAL_STATS_REFRESH_DISABLE)
3921 	        __hal_stats_enable(&hldev->stats);
3922 
3923 	return XGE_HAL_OK;
3924 }
3925 
3926 /**
3927  * xge_hal_device_disable - Disable Xframe adapter.
3928  * @hldev: Device handle.
3929  *
3930  * Disable this device. To gracefully reset the adapter, the host should:
3931  *
3932  *	- call xge_hal_device_disable();
3933  *
3934  *	- call xge_hal_device_intr_disable();
3935  *
3936  *	- close all opened channels and clean up outstanding resources;
3937  *
3938  *	- do some work (error recovery, change mtu, reset, etc);
3939  *
3940  *	- call xge_hal_device_enable();
3941  *
3942  *	- open channels, replenish RxDs, etc.
3943  *
3944  *	- call xge_hal_device_intr_enable().
3945  *
3946  * Note: Disabling the device does _not_ include disabling of interrupts.
3947  * After disabling the device stops receiving new frames but those frames
3948  * that were already in the pipe will keep coming for some few milliseconds.
3949  *
3950  * Returns:  XGE_HAL_OK - success.
3951  * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
3952  * a "quiescent" state.
3953  *
3954  * See also: xge_hal_status_e{}.
3955  */
3956 xge_hal_status_e
3957 xge_hal_device_disable(xge_hal_device_t *hldev)
3958 {
3959 	xge_hal_status_e status = XGE_HAL_OK;
3960 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3961 	u64 val64;
3962 
3963 	xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware");
3964 
3965 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3966 	                            &bar0->adapter_control);
3967 	val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN);
3968 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
3969 	                     &bar0->adapter_control);
3970 
3971 	if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) {
3972 		status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3973 	}
3974 
3975 	if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
3976 		 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
3977 		 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
3978 		xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
3979 		status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3980 	}
3981 
3982 	if (hldev->config.stats_refresh_time_sec !=
3983 	    XGE_HAL_STATS_REFRESH_DISABLE)
3984                 __hal_stats_disable(&hldev->stats);
3985 #ifdef XGE_DEBUG_ASSERT
3986         else
3987 	        xge_assert(!hldev->stats.is_enabled);
3988 #endif
3989 
3990 #ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP
3991 	__hal_device_bus_master_disable(hldev);
3992 #endif
3993 
3994 	return status;
3995 }
3996 
3997 /**
3998  * xge_hal_device_reset - Reset device.
3999  * @hldev: HAL device handle.
4000  *
4001  * Soft-reset the device, reset the device stats except reset_cnt.
4002  *
4003  * After reset is done, will try to re-initialize HW.
4004  *
4005  * Returns:  XGE_HAL_OK - success.
4006  * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized.
4007  * XGE_HAL_ERR_RESET_FAILED - Reset failed.
4008  *
4009  * See also: xge_hal_status_e{}.
4010  */
4011 xge_hal_status_e
4012 xge_hal_device_reset(xge_hal_device_t *hldev)
4013 {
4014 	xge_hal_status_e status;
4015 
4016 	/* increment the soft reset counter */
4017 	u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt;
4018 
4019 	xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt);
4020 
4021 	if (!hldev->is_initialized)
4022 		return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED;
4023 
4024 	/* actual "soft" reset of the adapter */
4025 	status = __hal_device_reset(hldev);
4026 
4027 	/* reset all stats including saved */
4028 	__hal_stats_soft_reset(hldev, 1);
4029 
4030 	/* increment reset counter */
4031 	hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1;
4032 
4033 	/* re-initialize rxufca_intr_thres */
4034 	hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
4035 
4036         hldev->reset_needed_after_close = 0;
4037 
4038 	return status;
4039 }
4040 
4041 /**
4042  * xge_hal_device_status - Check whether Xframe hardware is ready for
4043  * operation.
4044  * @hldev: HAL device handle.
4045  * @hw_status: Xframe status register. Returned by HAL.
4046  *
4047  * Check whether Xframe hardware is ready for operation.
4048  * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest
4049  * hardware functional blocks.
4050  *
4051  * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise
4052  * returns XGE_HAL_FAIL. Also, fills in  adapter status (in @hw_status).
4053  *
4054  * See also: xge_hal_status_e{}.
4055  * Usage: See ex_open{}.
4056  */
4057 xge_hal_status_e
4058 xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status)
4059 {
4060 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4061 	u64 tmp64;
4062 
4063 	tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4064 	                            &bar0->adapter_status);
4065 
4066 
4067 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) {
4068 		xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!");
4069 		return XGE_HAL_FAIL;
4070 	}
4071 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) {
4072 		xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!");
4073 		return XGE_HAL_FAIL;
4074 	}
4075 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) {
4076 		xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!");
4077 		return XGE_HAL_FAIL;
4078 	}
4079 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
4080 		xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!");
4081 		return XGE_HAL_FAIL;
4082 	}
4083 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) {
4084 		xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!");
4085 		return XGE_HAL_FAIL;
4086 	}
4087 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) {
4088 		xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!");
4089 		return XGE_HAL_FAIL;
4090 	}
4091 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) {
4092 		xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!");
4093 		return XGE_HAL_FAIL;
4094 	}
4095 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) {
4096 		xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!");
4097 		return XGE_HAL_FAIL;
4098 	}
4099 	if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK)) {
4100 		xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!");
4101 		return XGE_HAL_FAIL;
4102 	}
4103 
4104 	*hw_status = tmp64;
4105 
4106 	return XGE_HAL_OK;
4107 }
4108 
4109 
4110 /**
4111  * xge_hal_device_intr_enable - Enable Xframe interrupts.
4112  * @hldev: HAL device handle.
4113  * @op: One of the xge_hal_device_intr_e enumerated values specifying
4114  *      the type(s) of interrupts to enable.
4115  *
4116  * Enable Xframe interrupts. The function is to be executed the last in
4117  * Xframe initialization sequence.
4118  *
4119  * See also: xge_hal_device_intr_disable()
4120  */
4121 void
4122 xge_hal_device_intr_enable(xge_hal_device_t *hldev)
4123 {
4124 	xge_list_t *item;
4125 	u64 val64;
4126 
4127 	/* PRC initialization and configuration */
4128 	xge_list_for_each(item, &hldev->ring_channels) {
4129 		xge_hal_channel_h channel;
4130 		channel = xge_container_of(item, xge_hal_channel_t, item);
4131 		__hal_ring_prc_enable(channel);
4132 	}
4133 
4134 	/* enable traffic only interrupts */
4135 	if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) {
4136 		/*
4137 		 * make sure all interrupts going to be disabled if MSI
4138 		 * is enabled.
4139 		 */
4140 		__hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
4141 	} else {
4142 
4143 		/*
4144 		 * Enable the Tx traffic interrupts only if the TTI feature is
4145 		 * enabled.
4146 		 */
4147 		val64 = 0;
4148 		if (hldev->tti_enabled)
4149 			val64 = XGE_HAL_TX_TRAFFIC_INTR;
4150 
4151 		if (!hldev->config.bimodal_interrupts)
4152 			val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4153 
4154 		if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4155 			val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4156 
4157 		val64 |=XGE_HAL_TX_PIC_INTR |
4158 			XGE_HAL_MC_INTR |
4159 			(hldev->config.sched_timer_us !=
4160 			 XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0);
4161 		__hal_device_intr_mgmt(hldev, val64, 1);
4162 	}
4163 	xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled");
4164 }
4165 
4166 
4167 /**
4168  * xge_hal_device_intr_disable - Disable Xframe interrupts.
4169  * @hldev: HAL device handle.
4170  * @op: One of the xge_hal_device_intr_e enumerated values specifying
4171  *      the type(s) of interrupts to disable.
4172  *
4173  * Disable Xframe interrupts.
4174  *
4175  * See also: xge_hal_device_intr_enable()
4176  */
4177 void
4178 xge_hal_device_intr_disable(xge_hal_device_t *hldev)
4179 {
4180 	xge_list_t *item;
4181 	xge_hal_pci_bar0_t *bar0;
4182 	u64 val64;
4183 
4184 	/*
4185 	 * Disable traffic only interrupts.
4186 	 * Tx traffic interrupts are used only if the TTI feature is
4187 	 * enabled.
4188 	 */
4189 	val64 = 0;
4190 	if (hldev->tti_enabled)
4191 		val64 = XGE_HAL_TX_TRAFFIC_INTR;
4192 
4193 	val64 |= XGE_HAL_RX_TRAFFIC_INTR |
4194 		 XGE_HAL_TX_PIC_INTR |
4195 		 XGE_HAL_MC_INTR |
4196 		 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ?
4197 						XGE_HAL_SCHED_INTR : 0);
4198 	__hal_device_intr_mgmt(hldev, val64, 0);
4199 
4200 	bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4201 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4202 	                     0xFFFFFFFFFFFFFFFFULL,
4203 			     &bar0->general_int_mask);
4204 
4205 
4206 	/* disable all configured PRCs */
4207 	xge_list_for_each(item, &hldev->ring_channels) {
4208 		xge_hal_channel_h channel;
4209 		channel = xge_container_of(item, xge_hal_channel_t, item);
4210 		__hal_ring_prc_disable(channel);
4211 	}
4212 
4213 	xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled");
4214 }
4215 
4216 
4217 /**
4218  * xge_hal_device_mcast_enable - Enable Xframe multicast addresses.
4219  * @hldev: HAL device handle.
4220  *
4221  * Enable Xframe multicast addresses.
4222  * Returns: XGE_HAL_OK on success.
4223  * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast
4224  * feature within the time(timeout).
4225  *
4226  * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}.
4227  */
4228 xge_hal_status_e
4229 xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
4230 {
4231 	u64 val64;
4232 	xge_hal_pci_bar0_t *bar0;
4233 	int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4234 
4235 	if (hldev == NULL)
4236 		return XGE_HAL_ERR_INVALID_DEVICE;
4237 
4238 	if (hldev->mcast_refcnt)
4239 		return XGE_HAL_OK;
4240 
4241 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4242 		mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4243 
4244 	hldev->mcast_refcnt = 1;
4245 
4246 	bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4247 
4248 	/*  Enable all Multicast addresses */
4249 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4250 	      XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL),
4251 	      &bar0->rmac_addr_data0_mem);
4252 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4253 	      XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL),
4254 	      &bar0->rmac_addr_data1_mem);
4255 	val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4256 		XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4257 		XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4258 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4259 		            &bar0->rmac_addr_cmd_mem);
4260 
4261 	if (__hal_device_register_poll(hldev,
4262 		&bar0->rmac_addr_cmd_mem, 0,
4263 		XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4264 		XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4265 		/* upper layer may require to repeat */
4266 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4267 	}
4268 
4269 	return XGE_HAL_OK;
4270 }
4271 
4272 /**
4273  * xge_hal_device_mcast_disable - Disable Xframe multicast addresses.
4274  * @hldev: HAL device handle.
4275  *
4276  * Disable Xframe multicast addresses.
4277  * Returns: XGE_HAL_OK - success.
4278  * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast
4279  * feature within the time(timeout).
4280  *
4281  * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}.
4282  */
4283 xge_hal_status_e
4284 xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
4285 {
4286 	u64 val64;
4287 	xge_hal_pci_bar0_t *bar0;
4288 	int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4289 
4290 	if (hldev == NULL)
4291 		return XGE_HAL_ERR_INVALID_DEVICE;
4292 
4293 	if (hldev->mcast_refcnt == 0)
4294 		return XGE_HAL_OK;
4295 
4296 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4297 		mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4298 
4299 	hldev->mcast_refcnt = 0;
4300 
4301 	bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4302 
4303 	/*  Disable all Multicast addresses */
4304 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4305 	       XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL),
4306 		       &bar0->rmac_addr_data0_mem);
4307 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4308 	       XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0),
4309 		       &bar0->rmac_addr_data1_mem);
4310 
4311 	val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4312 		XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4313 		XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4314 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4315 		            &bar0->rmac_addr_cmd_mem);
4316 
4317 	if (__hal_device_register_poll(hldev,
4318 		&bar0->rmac_addr_cmd_mem, 0,
4319 		XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4320 		XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4321 		/* upper layer may require to repeat */
4322 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4323 	}
4324 
4325 	return XGE_HAL_OK;
4326 }
4327 
4328 /**
4329  * xge_hal_device_promisc_enable - Enable promiscuous mode.
4330  * @hldev: HAL device handle.
4331  *
4332  * Enable promiscuous mode of Xframe operation.
4333  *
4334  * See also: xge_hal_device_promisc_disable().
4335  */
4336 void
4337 xge_hal_device_promisc_enable(xge_hal_device_t *hldev)
4338 {
4339 	u64 val64;
4340 	xge_hal_pci_bar0_t *bar0;
4341 
4342 	xge_assert(hldev);
4343 
4344 	bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4345 
4346 	if (!hldev->is_promisc) {
4347 		/*  Put the NIC into promiscuous mode */
4348 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4349 		                            &bar0->mac_cfg);
4350 		val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4351 
4352 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4353 			       XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4354 			       &bar0->rmac_cfg_key);
4355 
4356 		__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4357 				      (u32)(val64 >> 32),
4358 				      &bar0->mac_cfg);
4359 
4360 		hldev->is_promisc = 1;
4361 		xge_debug_device(XGE_TRACE,
4362 			"mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled",
4363 			(unsigned long long)val64);
4364 	}
4365 }
4366 
4367 /**
4368  * xge_hal_device_promisc_disable - Disable promiscuous mode.
4369  * @hldev: HAL device handle.
4370  *
4371  * Disable promiscuous mode of Xframe operation.
4372  *
4373  * See also: xge_hal_device_promisc_enable().
4374  */
4375 void
4376 xge_hal_device_promisc_disable(xge_hal_device_t *hldev)
4377 {
4378 	u64 val64;
4379 	xge_hal_pci_bar0_t *bar0;
4380 
4381 	xge_assert(hldev);
4382 
4383 	bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4384 
4385 	if (hldev->is_promisc) {
4386 		/*  Remove the NIC from promiscuous mode */
4387 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4388 					    &bar0->mac_cfg);
4389 		val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4390 
4391 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4392 			       XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4393 			       &bar0->rmac_cfg_key);
4394 
4395 		__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4396 				      (u32)(val64 >> 32),
4397 				      &bar0->mac_cfg);
4398 
4399 		hldev->is_promisc = 0;
4400 		xge_debug_device(XGE_TRACE,
4401 			"mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled",
4402 			(unsigned long long)val64);
4403 	}
4404 }
4405 
4406 /**
4407  * xge_hal_device_macaddr_get - Get MAC addresses.
4408  * @hldev: HAL device handle.
4409  * @index: MAC address index, in the range from 0 to
4410  * XGE_HAL_MAX_MAC_ADDRESSES.
4411  * @macaddr: MAC address. Returned by HAL.
4412  *
4413  * Retrieve one of the stored MAC addresses by reading non-volatile
4414  * memory on the chip.
4415  *
4416  * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4417  *
4418  * Returns: XGE_HAL_OK - success.
4419  * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
4420  * address within the time(timeout).
4421  * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4422  *
4423  * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
4424  */
4425 xge_hal_status_e
4426 xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index,
4427 			macaddr_t *macaddr)
4428 {
4429 	xge_hal_pci_bar0_t *bar0 =
4430 		(xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4431 	u64 val64;
4432 	int i;
4433 
4434 	if (hldev == NULL) {
4435 		return XGE_HAL_ERR_INVALID_DEVICE;
4436 	}
4437 
4438 	if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) {
4439 		return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4440 	}
4441 
4442 #ifdef XGE_HAL_HERC_EMULATION
4443 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000,
4444 	                            &bar0->rmac_addr_data0_mem);
4445 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000,
4446 	                            &bar0->rmac_addr_data1_mem);
4447     val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4448 				 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4449 				 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index));
4450 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4451 	                     &bar0->rmac_addr_cmd_mem);
4452 
4453 		/* poll until done */
4454 	__hal_device_register_poll(hldev,
4455 		       &bar0->rmac_addr_cmd_mem, 0,
4456 		       XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD,
4457 		       XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS);
4458 
4459 #endif
4460 
4461 	val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4462 		  XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4463 		  XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4464 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4465 	                     &bar0->rmac_addr_cmd_mem);
4466 
4467 	if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4468 		   XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4469 		   XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4470 		/* upper layer may require to repeat */
4471 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4472 	}
4473 
4474 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4475 	                            &bar0->rmac_addr_data0_mem);
4476 	for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4477 		(*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8)));
4478 	}
4479 
4480 #ifdef XGE_HAL_HERC_EMULATION
4481 	for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4482 		(*macaddr)[i] = (u8)0;
4483 	}
4484 	(*macaddr)[1] = (u8)1;
4485 
4486 #endif
4487 
4488 	return XGE_HAL_OK;
4489 }
4490 
4491 /**
4492  * xge_hal_device_macaddr_set - Set MAC address.
4493  * @hldev: HAL device handle.
4494  * @index: MAC address index, in the range from 0 to
4495  * XGE_HAL_MAX_MAC_ADDRESSES.
4496  * @macaddr: New MAC address to configure.
4497  *
4498  * Configure one of the available MAC address "slots".
4499  *
4500  * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4501  *
4502  * Returns: XGE_HAL_OK - success.
4503  * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
4504  * address within the time(timeout).
4505  * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4506  *
4507  * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}.
4508  */
4509 xge_hal_status_e
4510 xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index,
4511 			macaddr_t macaddr)
4512 {
4513 	xge_hal_pci_bar0_t *bar0 =
4514 		(xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4515 	u64 val64, temp64;
4516 	int i;
4517 
4518 	if ( index >= XGE_HAL_MAX_MAC_ADDRESSES )
4519 		return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4520 
4521 	temp64 = 0;
4522 	for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4523 		temp64 |= macaddr[i];
4524 		temp64 <<= 8;
4525 	}
4526 	temp64 >>= 8;
4527 
4528 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4529 	                XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64),
4530 		        &bar0->rmac_addr_data0_mem);
4531 
4532 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4533 	                XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4534 		        &bar0->rmac_addr_data1_mem);
4535 
4536 	val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4537 		  XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4538 		  XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4539 
4540 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4541 	                     &bar0->rmac_addr_cmd_mem);
4542 
4543 	if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4544 		   XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4545 		   XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4546 		/* upper layer may require to repeat */
4547 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4548 	}
4549 
4550 	return XGE_HAL_OK;
4551 }
4552 
4553 /**
4554  * xge_hal_device_macaddr_find - Finds index in the rmac table.
4555  * @hldev: HAL device handle.
4556  * @wanted: Wanted MAC address.
4557  *
4558  * See also: xge_hal_device_macaddr_set().
4559  */
4560 int
4561 xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted)
4562 {
4563 	int i;
4564 
4565 	if (hldev == NULL) {
4566 		return XGE_HAL_ERR_INVALID_DEVICE;
4567 	}
4568 
4569 	for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) {
4570 		macaddr_t macaddr;
4571 		(void) xge_hal_device_macaddr_get(hldev, i, &macaddr);
4572 		if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) {
4573 			return i;
4574 		}
4575 	}
4576 
4577 	return -1;
4578 }
4579 
4580 /**
4581  * xge_hal_device_mtu_set - Set MTU.
4582  * @hldev: HAL device handle.
4583  * @new_mtu: New MTU size to configure.
4584  *
4585  * Set new MTU value. Example, to use jumbo frames:
4586  * xge_hal_device_mtu_set(my_device, my_channel, 9600);
4587  *
4588  * Returns: XGE_HAL_OK on success.
4589  * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control
4590  * register.
4591  * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI
4592  * schemes.
4593  * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
4594  * a "quiescent" state.
4595  */
4596 xge_hal_status_e
4597 xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu)
4598 {
4599 	xge_hal_status_e status;
4600 
4601 	/*
4602 	 * reset needed if 1) new MTU differs, and
4603 	 * 2a) device was closed or
4604 	 * 2b) device is being upped for first time.
4605 	 */
4606 	if (hldev->config.mtu != new_mtu) {
4607 		if (hldev->reset_needed_after_close ||
4608 			!hldev->mtu_first_time_set) {
4609 			status = xge_hal_device_reset(hldev);
4610 			if (status != XGE_HAL_OK) {
4611 				xge_debug_device(XGE_TRACE, "%s",
4612 					  "fatal: can not reset the device");
4613 				return status;
4614 			}
4615 		}
4616 		/* store the new MTU in device, reset will use it */
4617 		hldev->config.mtu = new_mtu;
4618 		xge_debug_device(XGE_TRACE, "new MTU %d applied",
4619 				 new_mtu);
4620 	}
4621 
4622 	if (!hldev->mtu_first_time_set)
4623 		hldev->mtu_first_time_set = 1;
4624 
4625 	return XGE_HAL_OK;
4626 }
4627 
4628 /**
4629  * xge_hal_device_initialize - Initialize Xframe device.
4630  * @hldev: HAL device handle.
4631  * @attr: pointer to xge_hal_device_attr_t structure
4632  * @device_config: Configuration to be _applied_ to the device,
4633  *                 For the Xframe configuration "knobs" please
4634  *                 refer to xge_hal_device_config_t and Xframe
4635  *                 User Guide.
4636  *
4637  * Initialize Xframe device. Note that all the arguments of this public API
4638  * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with
4639  * OS to find new Xframe device, locate its PCI and memory spaces.
4640  *
4641  * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL
4642  * to enable the latter to perform Xframe hardware initialization.
4643  *
4644  * Returns: XGE_HAL_OK - success.
4645  * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized.
4646  * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not
4647  * valid.
4648  * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed.
4649  * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid.
4650  * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid.
4651  * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
4652  * address within the time(timeout) or TTI/RTI initialization failed.
4653  * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control.
4654  * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent.
4655  *
4656  * See also: xge_hal_device_terminate(), xge_hal_status_e{}
4657  * xge_hal_device_attr_t{}.
4658  */
4659 xge_hal_status_e
4660 xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
4661 		xge_hal_device_config_t *device_config)
4662 {
4663 	int i;
4664 	xge_hal_status_e status;
4665 	xge_hal_channel_t *channel;
4666 	u16 subsys_device;
4667 	u16 subsys_vendor;
4668 	int total_dram_size, ring_auto_dram_cfg, left_dram_size;
4669 	int total_dram_size_max = 0;
4670 
4671 	xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing",
4672 			 (unsigned long long)(ulong_t)hldev);
4673 
4674 	/* sanity check */
4675 	if (g_xge_hal_driver == NULL ||
4676 	    !g_xge_hal_driver->is_initialized) {
4677 		return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED;
4678 	}
4679 
4680 	xge_os_memzero(hldev, sizeof(xge_hal_device_t));
4681 
4682 	/*
4683 	 * validate a common part of Xframe-I/II configuration
4684 	 * (and run check_card() later, once PCI inited - see below)
4685 	 */
4686 	status = __hal_device_config_check_common(device_config);
4687 	if (status != XGE_HAL_OK)
4688 		return status;
4689 
4690 	/* apply config */
4691 	xge_os_memcpy(&hldev->config, device_config,
4692                       sizeof(xge_hal_device_config_t));
4693 
4694 	/* save original attr */
4695 	xge_os_memcpy(&hldev->orig_attr, attr,
4696                       sizeof(xge_hal_device_attr_t));
4697 
4698 	/* initialize rxufca_intr_thres */
4699 	hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
4700 
4701 	hldev->regh0 = attr->regh0;
4702 	hldev->regh1 = attr->regh1;
4703 	hldev->regh2 = attr->regh2;
4704 	hldev->isrbar0 = hldev->bar0 = attr->bar0;
4705 	hldev->bar1 = attr->bar1;
4706 	hldev->bar2 = attr->bar2;
4707 	hldev->pdev = attr->pdev;
4708 	hldev->irqh = attr->irqh;
4709 	hldev->cfgh = attr->cfgh;
4710 
4711 	/* set initial bimodal timer for bimodal adaptive schema */
4712 	hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us;
4713 
4714 	hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh,
4715 				  g_xge_hal_driver->config.queue_size_initial,
4716 				  g_xge_hal_driver->config.queue_size_max,
4717 				  __hal_device_event_queued, hldev);
4718 	if (hldev->queueh == NULL)
4719 		return XGE_HAL_ERR_OUT_OF_MEMORY;
4720 
4721 	hldev->magic = XGE_HAL_MAGIC;
4722 
4723 	xge_assert(hldev->regh0);
4724 	xge_assert(hldev->regh1);
4725 	xge_assert(hldev->bar0);
4726 	xge_assert(hldev->bar1);
4727 	xge_assert(hldev->pdev);
4728 	xge_assert(hldev->irqh);
4729 	xge_assert(hldev->cfgh);
4730 
4731 	/* initialize some PCI/PCI-X fields of this PCI device. */
4732 	__hal_device_pci_init(hldev);
4733 
4734 	/*
4735 	 * initlialize lists to properly handling a potential
4736 	 * terminate request
4737 	 */
4738 	xge_list_init(&hldev->free_channels);
4739 	xge_list_init(&hldev->fifo_channels);
4740 	xge_list_init(&hldev->ring_channels);
4741 #ifdef XGEHAL_RNIC
4742 	xge_list_init(&hldev->sq_channels);
4743 	xge_list_init(&hldev->hrq_channels);
4744 	xge_list_init(&hldev->hcq_channels);
4745 	xge_list_init(&hldev->lrq_channels);
4746 	xge_list_init(&hldev->lcq_channels);
4747 	xge_list_init(&hldev->umq_channels);
4748 	xge_list_init(&hldev->dmq_channels);
4749 #endif
4750 
4751 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
4752 		/* fixups for xena */
4753 		hldev->config.rth_en = 0;
4754 		hldev->config.rth_spdm_en = 0;
4755 		hldev->config.rts_mac_en = 0;
4756 		total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA;
4757 
4758 		status = __hal_device_config_check_xena(device_config);
4759 		if (status != XGE_HAL_OK) {
4760 			xge_hal_device_terminate(hldev);
4761 			return status;
4762 		}
4763 		if (hldev->config.bimodal_interrupts == 1) {
4764 			xge_hal_device_terminate(hldev);
4765 			return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED;
4766 		} else if (hldev->config.bimodal_interrupts ==
4767 		    XGE_HAL_DEFAULT_USE_HARDCODE)
4768 			hldev->config.bimodal_interrupts = 0;
4769 	} else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4770 		/* fixups for herc */
4771 		total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC;
4772 		status = __hal_device_config_check_herc(device_config);
4773 		if (status != XGE_HAL_OK) {
4774 			xge_hal_device_terminate(hldev);
4775 			return status;
4776 		}
4777 		if (hldev->config.bimodal_interrupts ==
4778 		    XGE_HAL_DEFAULT_USE_HARDCODE)
4779 			hldev->config.bimodal_interrupts = 1;
4780 	} else {
4781 		xge_debug_device(XGE_ERR,
4782 			  "detected unknown device_id 0x%x", hldev->device_id);
4783 		xge_hal_device_terminate(hldev);
4784 		return XGE_HAL_ERR_BAD_DEVICE_ID;
4785 	}
4786 
4787 #ifdef XGEHAL_RNIC
4788 
4789 	if(__hal_blockpool_create(hldev,&hldev->block_pool,
4790 		XGE_HAL_BLOCKPOOL_SIZE) != XGE_HAL_OK) {
4791 		xge_debug_device(XGE_ERR,
4792 				"block pool: __hal_blockpool_create failed");
4793 		xge_hal_device_terminate(hldev);
4794 		return XGE_HAL_ERR_OUT_OF_MEMORY;
4795 	}
4796 
4797 #endif
4798 
4799 	/* allocate and initialize FIFO types of channels according to
4800 	 * configuration */
4801 	for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
4802 		if (!device_config->fifo.queue[i].configured)
4803 			continue;
4804 
4805 		channel = __hal_channel_allocate(hldev, i,
4806 						 XGE_HAL_CHANNEL_TYPE_FIFO);
4807 		if (channel == NULL) {
4808 			xge_debug_device(XGE_ERR,
4809 				"fifo: __hal_channel_allocate failed");
4810 			xge_hal_device_terminate(hldev);
4811 			return XGE_HAL_ERR_OUT_OF_MEMORY;
4812 		}
4813 		/* add new channel to the device */
4814 		xge_list_insert(&channel->item, &hldev->free_channels);
4815 	}
4816 
4817 	/*
4818 	 * automatic DRAM adjustment
4819 	 */
4820 	total_dram_size = 0;
4821 	ring_auto_dram_cfg = 0;
4822 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
4823 		if (!device_config->ring.queue[i].configured)
4824 			continue;
4825 		if (device_config->ring.queue[i].dram_size_mb ==
4826 		    XGE_HAL_DEFAULT_USE_HARDCODE) {
4827 			ring_auto_dram_cfg++;
4828 			continue;
4829 		}
4830 		total_dram_size += device_config->ring.queue[i].dram_size_mb;
4831 	}
4832 	left_dram_size = total_dram_size_max - total_dram_size;
4833 	if (left_dram_size < 0 ||
4834 	    (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0))  {
4835 		xge_debug_device(XGE_ERR,
4836 			 "ring config: exceeded DRAM size %d MB",
4837 			 total_dram_size_max);
4838 		xge_hal_device_terminate(hldev);
4839                 return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
4840         }
4841 
4842 	/*
4843 	 * allocate and initialize RING types of channels according to
4844 	 * configuration
4845 	 */
4846 	for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
4847 		if (!device_config->ring.queue[i].configured)
4848 			continue;
4849 
4850 		if (device_config->ring.queue[i].dram_size_mb ==
4851 		    XGE_HAL_DEFAULT_USE_HARDCODE) {
4852 			hldev->config.ring.queue[i].dram_size_mb =
4853 				device_config->ring.queue[i].dram_size_mb =
4854 					left_dram_size / ring_auto_dram_cfg;
4855 		}
4856 
4857 		channel = __hal_channel_allocate(hldev, i,
4858 					 XGE_HAL_CHANNEL_TYPE_RING);
4859 		if (channel == NULL) {
4860 			xge_debug_device(XGE_ERR,
4861 				"ring: __hal_channel_allocate failed");
4862 			xge_hal_device_terminate(hldev);
4863 			return XGE_HAL_ERR_OUT_OF_MEMORY;
4864 		}
4865 		/* add new channel to the device */
4866 		xge_list_insert(&channel->item, &hldev->free_channels);
4867 	}
4868 
4869 	/* get subsystem IDs */
4870 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
4871 		xge_offsetof(xge_hal_pci_config_le_t, subsystem_id),
4872 		&subsys_device);
4873 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
4874 		xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id),
4875 		&subsys_vendor);
4876 	xge_debug_device(XGE_TRACE,
4877                          "subsystem_id %04x:%04x",
4878                          subsys_vendor, subsys_device);
4879 
4880 	/* reset device initially */
4881 	(void) __hal_device_reset(hldev);
4882 
4883 	/* set host endian before, to assure proper action */
4884 	status = __hal_device_set_swapper(hldev);
4885 	if (status != XGE_HAL_OK) {
4886 		xge_debug_device(XGE_ERR,
4887 			"__hal_device_set_swapper failed");
4888 		xge_hal_device_terminate(hldev);
4889 		(void) __hal_device_reset(hldev);
4890 		return status;
4891 	}
4892 
4893 #ifndef XGE_HAL_HERC_EMULATION
4894 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4895 		__hal_device_xena_fix_mac(hldev);
4896 #endif
4897 
4898 	/*  MAC address initialization.
4899 	 *  For now only one mac address will be read and used.  */
4900 	status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]);
4901 	if (status != XGE_HAL_OK) {
4902 		xge_debug_device(XGE_ERR,
4903 			"xge_hal_device_macaddr_get failed");
4904 		xge_hal_device_terminate(hldev);
4905 		return status;
4906 	}
4907 
4908 	if (hldev->macaddr[0][0] == 0xFF &&
4909 	    hldev->macaddr[0][1] == 0xFF &&
4910 	    hldev->macaddr[0][2] == 0xFF &&
4911 	    hldev->macaddr[0][3] == 0xFF &&
4912 	    hldev->macaddr[0][4] == 0xFF &&
4913 	    hldev->macaddr[0][5] == 0xFF) {
4914 		xge_debug_device(XGE_ERR,
4915 			"xge_hal_device_macaddr_get returns all FFs");
4916 		xge_hal_device_terminate(hldev);
4917 		return XGE_HAL_ERR_INVALID_MAC_ADDRESS;
4918 	}
4919 
4920 	xge_debug_device(XGE_TRACE,
4921 			  "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
4922 			  hldev->macaddr[0][0], hldev->macaddr[0][1],
4923 			  hldev->macaddr[0][2], hldev->macaddr[0][3],
4924 			  hldev->macaddr[0][4], hldev->macaddr[0][5]);
4925 
4926 	status = __hal_stats_initialize(&hldev->stats, hldev);
4927 	if (status != XGE_HAL_OK) {
4928 		xge_debug_device(XGE_ERR,
4929 			"__hal_stats_initialize failed");
4930 		xge_hal_device_terminate(hldev);
4931 		return status;
4932 	}
4933 
4934 	status = __hal_device_hw_initialize(hldev);
4935 	if (status != XGE_HAL_OK) {
4936 		xge_debug_device(XGE_ERR,
4937 			"__hal_device_hw_initialize failed");
4938 		xge_hal_device_terminate(hldev);
4939 		return status;
4940 	}
4941 	hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE);
4942 	if (hldev->dump_buf == NULL)  {
4943 		xge_debug_device(XGE_ERR,
4944 			"__hal_device_hw_initialize failed");
4945 		xge_hal_device_terminate(hldev);
4946                 return XGE_HAL_ERR_OUT_OF_MEMORY;
4947 	}
4948 
4949 
4950 	/* Xena-only: need to serialize fifo posts across all device fifos */
4951 #if defined(XGE_HAL_TX_MULTI_POST)
4952 	xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev);
4953 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
4954 	xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh);
4955 #endif
4956 
4957 	hldev->is_initialized = 1;
4958 
4959 	return XGE_HAL_OK;
4960 }
4961 
4962 /**
4963  * xge_hal_device_terminating - Mark the device as 'terminating'.
4964  * @devh: HAL device handle.
4965  *
4966  * Mark the device as 'terminating', going to terminate. Can be used
4967  * to serialize termination with other running processes/contexts.
4968  *
4969  * See also: xge_hal_device_terminate().
4970  */
4971 void
4972 xge_hal_device_terminating(xge_hal_device_h devh)
4973 {
4974 	xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
4975 	hldev->terminating = 1;
4976 }
4977 
4978 /**
4979  * xge_hal_device_terminate - Terminate Xframe device.
4980  * @hldev: HAL device handle.
4981  *
4982  * Terminate HAL device.
4983  *
4984  * See also: xge_hal_device_initialize().
4985  */
4986 void
4987 xge_hal_device_terminate(xge_hal_device_t *hldev)
4988 {
4989 	xge_assert(g_xge_hal_driver != NULL);
4990 	xge_assert(hldev != NULL);
4991 	xge_assert(hldev->magic == XGE_HAL_MAGIC);
4992 
4993 	xge_queue_flush(hldev->queueh);
4994 
4995 	hldev->terminating = 1;
4996 	hldev->is_initialized = 0;
4997         hldev->in_poll = 0;
4998 	hldev->magic = XGE_HAL_DEAD;
4999 
5000 #if defined(XGE_HAL_TX_MULTI_POST)
5001 	xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev);
5002 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5003 	xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev);
5004 #endif
5005 
5006 	xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating",
5007 				(unsigned long long)(ulong_t)hldev);
5008 
5009 	xge_assert(xge_list_is_empty(&hldev->fifo_channels));
5010 	xge_assert(xge_list_is_empty(&hldev->ring_channels));
5011 
5012 	if (hldev->stats.is_initialized) {
5013 		__hal_stats_terminate(&hldev->stats);
5014 	}
5015 
5016 	/* close if open and free all channels */
5017 	while (!xge_list_is_empty(&hldev->free_channels)) {
5018 		xge_hal_channel_t *channel = (xge_hal_channel_t*)
5019 					hldev->free_channels.next;
5020 
5021 		xge_assert(!channel->is_open);
5022 		xge_list_remove(&channel->item);
5023 		__hal_channel_free(channel);
5024 	}
5025 
5026 	if (hldev->queueh) {
5027 		xge_queue_destroy(hldev->queueh);
5028 	}
5029 
5030 	if (hldev->spdm_table) {
5031 		xge_os_free(hldev->pdev,
5032 			  hldev->spdm_table[0],
5033 			  (sizeof(xge_hal_spdm_entry_t) *
5034 				hldev->spdm_max_entries));
5035 		xge_os_free(hldev->pdev,
5036 			  hldev->spdm_table,
5037 			  (sizeof(xge_hal_spdm_entry_t *) *
5038 				hldev->spdm_max_entries));
5039 		xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev);
5040 		hldev->spdm_table = NULL;
5041 	}
5042 
5043 	if (hldev->dump_buf)  {
5044 	        xge_os_free(hldev->pdev, hldev->dump_buf,
5045 			    XGE_HAL_DUMP_BUF_SIZE);
5046 		hldev->dump_buf = NULL;
5047 	}
5048 
5049 }
5050 
5051 /**
5052  * xge_hal_device_handle_tcode - Handle transfer code.
5053  * @channelh: Channel handle.
5054  * @dtrh: Descriptor handle.
5055  * @t_code: One of the enumerated (and documented in the Xframe user guide)
5056  *          "transfer codes".
5057  *
5058  * Handle descriptor's transfer code. The latter comes with each completed
5059  * descriptor, see xge_hal_fifo_dtr_next_completed() and
5060  * xge_hal_ring_dtr_next_completed().
5061  * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h.
5062  *
5063  * Returns: one of the xge_hal_status_e{} enumerated types.
5064  * XGE_HAL_OK			- for success.
5065  * XGE_HAL_ERR_CRITICAL         - when encounters critical error.
5066  */
5067 xge_hal_status_e
5068 xge_hal_device_handle_tcode (xge_hal_channel_h channelh,
5069 			     xge_hal_dtr_h dtrh, u8 t_code)
5070 {
5071 	xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
5072 	xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
5073 
5074 	if (t_code > 15) {
5075 		xge_os_printf("invalid t_code %d", t_code);
5076 		return XGE_HAL_OK;
5077 	}
5078 
5079 	if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
5080 	        hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++;
5081 
5082 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5083         xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
5084         xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
5085 		XGE_OS_LLXFMT":"XGE_OS_LLXFMT,
5086 		txdp->control_1, txdp->control_2, txdp->buffer_pointer,
5087 		txdp->host_control);
5088 #endif
5089 
5090 		/* handle link "down" immediately without going through
5091 		 * xge_hal_device_poll() routine. */
5092 		if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) {
5093 			/* link is down */
5094 			if (hldev->link_state != XGE_HAL_LINK_DOWN) {
5095 				xge_hal_pci_bar0_t *bar0 =
5096 				(xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5097 				u64 val64;
5098 
5099 				hldev->link_state = XGE_HAL_LINK_DOWN;
5100 
5101 				val64 = xge_os_pio_mem_read64(hldev->pdev,
5102 				    hldev->regh0, &bar0->adapter_control);
5103 
5104 				/* turn off LED */
5105 				val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
5106 				xge_os_pio_mem_write64(hldev->pdev,
5107 						hldev->regh0, val64,
5108 						&bar0->adapter_control);
5109 
5110 				g_xge_hal_driver->uld_callbacks.link_down(
5111 						hldev->upper_layer_info);
5112 			}
5113 		} else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER ||
5114 		           t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) {
5115                         __hal_device_handle_targetabort(hldev);
5116 			return XGE_HAL_ERR_CRITICAL;
5117 		}
5118 	} else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
5119 	        hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++;
5120 
5121 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5122 		xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
5123 		xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT
5124 			":"XGE_OS_LLXFMT, rxdp->control_1,
5125 			rxdp->control_2, rxdp->buffer0_ptr,
5126 			rxdp->host_control);
5127 #endif
5128 		if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) {
5129 			hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
5130 			__hal_device_handle_eccerr(hldev, "rxd_t_code",
5131 						   (u64)t_code);
5132 			return XGE_HAL_ERR_CRITICAL;
5133 		} else if (t_code == XGE_HAL_RXD_T_CODE_PARITY ||
5134 			   t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) {
5135 			hldev->stats.sw_dev_err_stats.parity_err_cnt++;
5136 			__hal_device_handle_parityerr(hldev, "rxd_t_code",
5137 						      (u64)t_code);
5138 			return XGE_HAL_ERR_CRITICAL;
5139 		}
5140 	}
5141 	return XGE_HAL_OK;
5142 }
5143 
5144 /**
5145  * xge_hal_device_link_state - Get link state.
5146  * @devh: HAL device handle.
5147  * @ls: Link state, see xge_hal_device_link_state_e{}.
5148  *
5149  * Get link state.
5150  * Returns: XGE_HAL_OK.
5151  * See also: xge_hal_device_link_state_e{}.
5152  */
5153 xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh,
5154 			xge_hal_device_link_state_e *ls)
5155 {
5156 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5157 
5158 	xge_assert(ls != NULL);
5159 	*ls = hldev->link_state;
5160 	return XGE_HAL_OK;
5161 }
5162 
5163 /**
5164  * xge_hal_device_sched_timer - Configure scheduled device interrupt.
5165  * @devh: HAL device handle.
5166  * @interval_us: Time interval, in miscoseconds.
5167  *            Unlike transmit and receive interrupts,
5168  *            the scheduled interrupt is generated independently of
5169  *            traffic, but purely based on time.
5170  * @one_shot: 1 - generate scheduled interrupt only once.
5171  *            0 - generate scheduled interrupt periodically at the specified
5172  *            @interval_us interval.
5173  *
5174  * (Re-)configure scheduled interrupt. Can be called at runtime to change
5175  * the setting, generate one-shot interrupts based on the resource and/or
5176  * traffic conditions, other purposes.
5177  * See also: xge_hal_device_config_t{}.
5178  */
5179 void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
5180 			int one_shot)
5181 {
5182 	u64 val64;
5183 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5184 	xge_hal_pci_bar0_t *bar0 =
5185 		(xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5186 	unsigned int interval = hldev->config.pci_freq_mherz * interval_us;
5187 
5188 	interval = __hal_fix_time_ival_herc(hldev, interval);
5189 
5190 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5191 				    &bar0->scheduled_int_ctrl);
5192 	if (interval) {
5193 		val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK;
5194 		val64 |= XGE_HAL_SCHED_INT_PERIOD(interval);
5195 		if (one_shot) {
5196 			val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT;
5197 		}
5198 		val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5199 	} else {
5200 		val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5201 	}
5202 
5203 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
5204 			     val64, &bar0->scheduled_int_ctrl);
5205 
5206 	xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s",
5207 			  (unsigned long long)val64,
5208 			  interval ? "enabled" : "disabled");
5209 }
5210 
5211 /**
5212  * xge_hal_device_check_id - Verify device ID.
5213  * @devh: HAL device handle.
5214  *
5215  * Verify device ID.
5216  * Returns: one of the xge_hal_card_e{} enumerated types.
5217  * See also: xge_hal_card_e{}.
5218  */
5219 xge_hal_card_e
5220 xge_hal_device_check_id(xge_hal_device_h devh)
5221 {
5222 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5223 	switch (hldev->device_id) {
5224 	case XGE_PCI_DEVICE_ID_XENA_1:
5225 	case XGE_PCI_DEVICE_ID_XENA_2:
5226 		return XGE_HAL_CARD_XENA;
5227 	case XGE_PCI_DEVICE_ID_HERC_1:
5228 	case XGE_PCI_DEVICE_ID_HERC_2:
5229 		return XGE_HAL_CARD_HERC;
5230 	default:
5231 		return XGE_HAL_CARD_UNKNOWN;
5232 	}
5233 }
5234 
5235 /**
5236  * xge_hal_device_pci_info_get - Get PCI bus informations such as width,
5237  *			 frequency, and mode from previously stored values.
5238  * @devh:		HAL device handle.
5239  * @pci_mode:		pointer to a variable of enumerated type
5240  *			xge_hal_pci_mode_e{}.
5241  * @bus_frequency:	pointer to a variable of enumerated type
5242  *			xge_hal_pci_bus_frequency_e{}.
5243  * @bus_width:		pointer to a variable of enumerated type
5244  *			xge_hal_pci_bus_width_e{}.
5245  *
5246  * Get pci mode, frequency, and PCI bus width.
5247  * Returns: one of the xge_hal_status_e{} enumerated types.
5248  * XGE_HAL_OK			- for success.
5249  * XGE_HAL_ERR_INVALID_DEVICE	- for invalid device handle.
5250  * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
5251  */
5252 xge_hal_status_e
5253 xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
5254 		xge_hal_pci_bus_frequency_e *bus_frequency,
5255 		xge_hal_pci_bus_width_e *bus_width)
5256 {
5257 	xge_hal_status_e rc_status;
5258 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5259 
5260 	if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) {
5261 		rc_status =  XGE_HAL_ERR_INVALID_DEVICE;
5262 		xge_debug_device(XGE_ERR,
5263 		        "xge_hal_device_pci_info_get error, rc %d for device %p",
5264 			rc_status, hldev);
5265 
5266 		return rc_status;
5267 	}
5268 
5269 	*pci_mode	= hldev->pci_mode;
5270 	*bus_frequency	= hldev->bus_frequency;
5271 	*bus_width	= hldev->bus_width;
5272 	rc_status	= XGE_HAL_OK;
5273 	return rc_status;
5274 }
5275 
5276 /**
5277  * xge_hal_reinitialize_hw
5278  * @hldev: private member of the device structure.
5279  *
5280  * This function will soft reset the NIC and re-initalize all the
5281  * I/O registers to the values they had after it's inital initialization
5282  * through the probe function.
5283  */
5284 int xge_hal_reinitialize_hw(xge_hal_device_t * hldev)
5285 {
5286 	(void) xge_hal_device_reset(hldev);
5287 	if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) {
5288 		xge_hal_device_terminate(hldev);
5289 		(void) __hal_device_reset(hldev);
5290 		return 1;
5291 	}
5292 	return 0;
5293 }
5294 
5295 
5296 /*
5297  * __hal_read_spdm_entry_line
5298  * @hldev: pointer to xge_hal_device_t structure
5299  * @spdm_line: spdm line in the spdm entry to be read.
5300  * @spdm_entry: spdm entry of the spdm_line in the SPDM table.
5301  * @spdm_line_val: Contains the value stored in the spdm line.
5302  *
5303  * SPDM table contains upto a maximum of 256 spdm entries.
5304  * Each spdm entry contains 8 lines and each line stores 8 bytes.
5305  * This function reads the spdm line(addressed by @spdm_line)
5306  * of the spdm entry(addressed by @spdm_entry) in
5307  * the SPDM table.
5308  */
5309 xge_hal_status_e
5310 __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line,
5311 			u16 spdm_entry, u64 *spdm_line_val)
5312 {
5313 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5314 	u64 val64;
5315 
5316 	val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE |
5317 		XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) |
5318 		XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry);
5319 
5320 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
5321 			&bar0->rts_rth_spdm_mem_ctrl);
5322 
5323 	/* poll until done */
5324 	if (__hal_device_register_poll(hldev,
5325 		&bar0->rts_rth_spdm_mem_ctrl, 0,
5326 		XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE,
5327 		XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
5328 
5329 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5330 	}
5331 
5332 	*spdm_line_val = xge_os_pio_mem_read64(hldev->pdev,
5333 				hldev->regh0, &bar0->rts_rth_spdm_mem_data);
5334 	return XGE_HAL_OK;
5335 }
5336 
5337 
5338 /*
5339  * __hal_get_free_spdm_entry
5340  * @hldev: pointer to xge_hal_device_t structure
5341  * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table.
5342  *
5343  * This function returns an index of unused spdm entry in the SPDM
5344  * table.
5345  */
5346 static xge_hal_status_e
5347 __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry)
5348 {
5349 	xge_hal_status_e status;
5350 	u64 spdm_line_val=0;
5351 
5352 	/*
5353 	 * Search in the local SPDM table for a free slot.
5354 	 */
5355 	*spdm_entry = 0;
5356 	for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) {
5357 		if (hldev->spdm_table[*spdm_entry]->in_use) {
5358 			break;
5359 		}
5360 	}
5361 
5362 	if (*spdm_entry >= hldev->spdm_max_entries) {
5363 		return XGE_HAL_ERR_SPDM_TABLE_FULL;
5364 	}
5365 
5366 	/*
5367 	 * Make sure that the corresponding spdm entry in the SPDM
5368 	 * table is free.
5369 	 * Seventh line of the spdm entry contains information about
5370 	 * whether the entry is free or not.
5371 	 */
5372 	if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry,
5373 					&spdm_line_val)) != XGE_HAL_OK) {
5374 		return status;
5375 	}
5376 
5377 	/* BIT(63) in spdm_line 7 corresponds to entry_enable bit */
5378 	if ((spdm_line_val & BIT(63))) {
5379 		/*
5380 		 * Log a warning
5381 		 */
5382 		xge_debug_device(XGE_ERR, "Local SPDM table is not "
5383 			  "consistent with the actual one for the spdm "
5384 			  "entry %d\n", *spdm_entry);
5385 		return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
5386 	}
5387 
5388 	return XGE_HAL_OK;
5389 }
5390 
5391 
5392 
5393 /**
5394  * xge_hal_spdm_entry_add - Add a new entry to the SPDM table.
5395  * @devh: HAL device handle.
5396  * @src_ip: Source ip address(IPv4/IPv6).
5397  * @dst_ip: Destination ip address(IPv4/IPv6).
5398  * @l4_sp: L4 source port.
5399  * @l4_dp: L4 destination port.
5400  * @is_tcp: Set to 1, if the protocol is TCP.
5401  *		   0, if the protocol is UDP.
5402  * @is_ipv4: Set to 1, if the protocol is IPv4.
5403  *		   0, if the protocol is IPv6.
5404  * @tgt_queue: Target queue to route the receive packet.
5405  *
5406  * This function add a new entry to the SPDM table.
5407  *
5408  * Returns:  XGE_HAL_OK - success.
5409  * XGE_HAL_ERR_SPDM_NOT_ENABLED -  SPDM support is not enabled.
5410  * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in
5411  *					the time(timeout).
5412  * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full.
5413  * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry.
5414  *
5415  * See also: xge_hal_spdm_entry_remove{}.
5416  */
5417 xge_hal_status_e
5418 xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
5419 		xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
5420 		u8 is_tcp, u8 is_ipv4, u8 tgt_queue)
5421 {
5422 
5423 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5424 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5425 	u32 jhash_value;
5426 	u32 jhash_init_val;
5427 	u32 jhash_golden_ratio;
5428 	u64 val64;
5429 	int off;
5430 	u16 spdm_entry;
5431 	u8  msg[XGE_HAL_JHASH_MSG_LEN];
5432 	int ipaddr_len;
5433 	xge_hal_status_e status;
5434 
5435 
5436 	if (!hldev->config.rth_spdm_en) {
5437 		return XGE_HAL_ERR_SPDM_NOT_ENABLED;
5438 	}
5439 
5440 	if ((tgt_queue <  XGE_HAL_MIN_RING_NUM) ||
5441 		(tgt_queue  >  XGE_HAL_MAX_RING_NUM)) {
5442 		return XGE_HAL_ERR_SPDM_INVALID_ENTRY;
5443 	}
5444 
5445 
5446 	/*
5447 	 * Calculate the jenkins hash.
5448 	 */
5449 	/*
5450 	 * Create the Jenkins hash algorithm key.
5451 	 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to
5452 	 * use L4 information. Otherwize key = {L3SA, L3DA}.
5453 	 */
5454 
5455 	if (is_ipv4) {
5456 		ipaddr_len = 4;   // In bytes
5457 	} else {
5458 		ipaddr_len = 16;
5459 	}
5460 
5461 	/*
5462 	 * Jenkins hash algorithm expects the key in the big endian
5463 	 * format. Since key is the byte array, memcpy won't work in the
5464 	 * case of little endian. So, the current code extracts each
5465 	 * byte starting from MSB and store it in the key.
5466 	 */
5467 	if (is_ipv4) {
5468 		for (off = 0; off < ipaddr_len; off++) {
5469 			u32 mask = vBIT32(0xff,(off*8),8);
5470 			int shift = 32-(off+1)*8;
5471 			msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift);
5472 			msg[off+ipaddr_len] =
5473 				(u8)((dst_ip->ipv4.addr & mask) >> shift);
5474 		}
5475 	} else {
5476 		for (off = 0; off < ipaddr_len; off++) {
5477 			int loc = off % 8;
5478 			u64 mask = vBIT(0xff,(loc*8),8);
5479 			int shift = 64-(loc+1)*8;
5480 
5481 			msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask)
5482 						>> shift);
5483 			msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8]
5484 						    & mask) >> shift);
5485 		}
5486 	}
5487 
5488 	off = (2*ipaddr_len);
5489 
5490 	if (hldev->config.rth_spdm_use_l4) {
5491 		msg[off] = (u8)((l4_sp & 0xff00) >> 8);
5492 		msg[off + 1] = (u8)(l4_sp & 0xff);
5493 		msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8);
5494 		msg[off + 3] = (u8)(l4_dp & 0xff);
5495 		off += 4;
5496 	}
5497 
5498 	/*
5499 	 * Calculate jenkins hash for this configuration
5500 	 */
5501 	val64 = xge_os_pio_mem_read64(hldev->pdev,
5502 				    hldev->regh0,
5503 				    &bar0->rts_rth_jhash_cfg);
5504 	jhash_golden_ratio = (u32)(val64 >> 32);
5505 	jhash_init_val = (u32)(val64 & 0xffffffff);
5506 
5507 	jhash_value = __hal_calc_jhash(msg, off,
5508 				       jhash_golden_ratio,
5509 				       jhash_init_val);
5510 
5511 	xge_os_spin_lock(&hldev->spdm_lock);
5512 
5513 	/*
5514 	 * Locate a free slot in the SPDM table. To avoid a seach in the
5515 	 * actual SPDM table, which is very expensive in terms of time,
5516 	 * we are maintaining a local copy of  the table and the search for
5517 	 * the free entry is performed in the local table.
5518 	 */
5519 	if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry))
5520 			!= XGE_HAL_OK) {
5521 		xge_os_spin_unlock(&hldev->spdm_lock);
5522 		return status;
5523 	}
5524 
5525 	/*
5526 	 * Add this entry to the SPDM table
5527 	 */
5528 	status =  __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp,
5529 				     is_tcp, is_ipv4, tgt_queue,
5530 				     jhash_value, /* calculated jhash */
5531 				     spdm_entry);
5532 
5533 	xge_os_spin_unlock(&hldev->spdm_lock);
5534 
5535 	return status;
5536 }
5537 
5538 /**
5539  * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table.
5540  * @devh: HAL device handle.
5541  * @src_ip: Source ip address(IPv4/IPv6).
5542  * @dst_ip: Destination ip address(IPv4/IPv6).
5543  * @l4_sp: L4 source port.
5544  * @l4_dp: L4 destination port.
5545  * @is_tcp: Set to 1, if the protocol is TCP.
5546  *		   0, if the protocol os UDP.
5547  * @is_ipv4: Set to 1, if the protocol is IPv4.
5548  *		   0, if the protocol is IPv6.
5549  *
5550  * This function remove an entry from the SPDM table.
5551  *
5552  * Returns:  XGE_HAL_OK - success.
5553  * XGE_HAL_ERR_SPDM_NOT_ENABLED -  SPDM support is not enabled.
5554  * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in
5555  *					the time(timeout).
5556  * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM
5557  *					table.
5558  *
5559  * See also: xge_hal_spdm_entry_add{}.
5560  */
5561 xge_hal_status_e
5562 xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
5563 		xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
5564 		u8 is_tcp, u8 is_ipv4)
5565 {
5566 
5567 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5568 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5569 	u64 val64;
5570 	u16 spdm_entry;
5571 	xge_hal_status_e status;
5572 	u64 spdm_line_arr[8];
5573 	u8 line_no;
5574 	u8 spdm_is_tcp;
5575 	u8 spdm_is_ipv4;
5576 	u16 spdm_l4_sp;
5577 	u16 spdm_l4_dp;
5578 
5579 	if (!hldev->config.rth_spdm_en) {
5580 		return XGE_HAL_ERR_SPDM_NOT_ENABLED;
5581 	}
5582 
5583 	xge_os_spin_lock(&hldev->spdm_lock);
5584 
5585 	/*
5586 	 * Poll the rxpic_int_reg register until spdm ready bit is set or
5587 	 * timeout happens.
5588 	 */
5589 	if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
5590 			XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
5591 			XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
5592 
5593 		/* upper layer may require to repeat */
5594 		xge_os_spin_unlock(&hldev->spdm_lock);
5595 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5596 	}
5597 
5598 	/*
5599 	 * Clear the SPDM READY bit.
5600 	 */
5601 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5602                                &bar0->rxpic_int_reg);
5603 	val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
5604 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
5605                       &bar0->rxpic_int_reg);
5606 
5607 	/*
5608 	 * Search in the local SPDM table to get the index of the
5609 	 * corresponding entry in the SPDM table.
5610 	 */
5611 	spdm_entry = 0;
5612 	for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) {
5613 		if ((!hldev->spdm_table[spdm_entry]->in_use) ||
5614 		    (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) ||
5615 		    (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) ||
5616 		    (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) ||
5617 		    (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) {
5618 			continue;
5619 		}
5620 
5621 		/*
5622 		 * Compare the src/dst IP addresses of source and target
5623 		 */
5624 		if (is_ipv4) {
5625 			if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr
5626 			     != src_ip->ipv4.addr) ||
5627 			    (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr
5628 			     != dst_ip->ipv4.addr)) {
5629 				continue;
5630 			}
5631 		} else {
5632 			if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0]
5633 			     != src_ip->ipv6.addr[0]) ||
5634 			    (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1]
5635 			     != src_ip->ipv6.addr[1]) ||
5636 			    (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0]
5637 			     != dst_ip->ipv6.addr[0]) ||
5638 			    (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1]
5639 			     != dst_ip->ipv6.addr[1])) {
5640 				continue;
5641 			}
5642 		}
5643 		break;
5644 	}
5645 
5646 	if (spdm_entry >= hldev->spdm_max_entries) {
5647 		xge_os_spin_unlock(&hldev->spdm_lock);
5648 		return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND;
5649 	}
5650 
5651 	/*
5652 	 * Retrieve the corresponding entry from the SPDM table and
5653 	 * make sure that the data is consistent.
5654 	 */
5655 	for(line_no = 0; line_no < 8; line_no++) {
5656 
5657 		/*
5658 		 *  SPDM line 2,3,4 are valid only for IPv6 entry.
5659 		 *  SPDM line 5 & 6 are reserved. We don't have to
5660 		 *  read these entries in the above cases.
5661 		 */
5662 		if (((is_ipv4) &&
5663 			((line_no == 2)||(line_no == 3)||(line_no == 4))) ||
5664 		     (line_no == 5) ||
5665 		     (line_no == 6)) {
5666 			continue;
5667 		}
5668 
5669 		if ((status = __hal_read_spdm_entry_line(
5670 					hldev,
5671 					line_no,
5672 					spdm_entry,
5673 					&spdm_line_arr[line_no]))
5674 							!= XGE_HAL_OK) {
5675 			xge_os_spin_unlock(&hldev->spdm_lock);
5676 			return status;
5677 		}
5678 	}
5679 
5680 	/*
5681 	 * Seventh line of the spdm entry contains the entry_enable
5682 	 * bit. Make sure that the entry_enable bit of this spdm entry
5683 	 * is set.
5684 	 * To remove an entry from the SPDM table, reset this
5685 	 * bit.
5686 	 */
5687 	if (!(spdm_line_arr[7] & BIT(63))) {
5688 		/*
5689 		 * Log a warning
5690 		 */
5691 		xge_debug_device(XGE_ERR, "Local SPDM table is not "
5692 			"consistent with the actual one for the spdm "
5693 			"entry %d \n", spdm_entry);
5694 		goto err_exit;
5695 	}
5696 
5697 	/*
5698 	 *  Retreive the L4 SP/DP, src/dst ip addresses from the SPDM
5699 	 *  table and do a comparision.
5700 	 */
5701 	spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4);
5702 	spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63));
5703 	spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48);
5704 	spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff);
5705 
5706 
5707 	if ((spdm_is_tcp != is_tcp) ||
5708 	    (spdm_is_ipv4 != is_ipv4) ||
5709 	    (spdm_l4_sp != l4_sp) ||
5710 	    (spdm_l4_dp != l4_dp)) {
5711 		/*
5712 		 * Log a warning
5713 		 */
5714 		xge_debug_device(XGE_ERR, "Local SPDM table is not "
5715 			"consistent with the actual one for the spdm "
5716 			"entry %d \n", spdm_entry);
5717 		goto err_exit;
5718 	}
5719 
5720 	if (is_ipv4) {
5721 		/* Upper 32 bits of spdm_line(64 bit) contains the
5722 		 * src IPv4 address. Lower 32 bits of spdm_line
5723 		 * contains the destination IPv4 address.
5724 		 */
5725 		u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32);
5726 		u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff);
5727 
5728 		if ((temp_src_ip != src_ip->ipv4.addr) ||
5729 		    (temp_dst_ip != dst_ip->ipv4.addr)) {
5730 			xge_debug_device(XGE_ERR, "Local SPDM table is not "
5731 				"consistent with the actual one for the spdm "
5732 				"entry %d \n", spdm_entry);
5733 			goto err_exit;
5734 		}
5735 
5736 	} else {
5737 		/*
5738 		 * SPDM line 1 & 2 contains the src IPv6 address.
5739 		 * SPDM line 3 & 4 contains the dst IPv6 address.
5740 		 */
5741 		if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) ||
5742 		    (spdm_line_arr[2] != src_ip->ipv6.addr[1]) ||
5743 		    (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) ||
5744 		    (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) {
5745 
5746 			/*
5747 			 * Log a warning
5748 			 */
5749 			xge_debug_device(XGE_ERR, "Local SPDM table is not "
5750 				"consistent with the actual one for the spdm "
5751 				"entry %d \n", spdm_entry);
5752 			goto err_exit;
5753 		}
5754 	}
5755 
5756 	/*
5757 	 * Reset the entry_enable bit to zero
5758 	 */
5759 	spdm_line_arr[7] &= ~BIT(63);
5760 
5761 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
5762 		spdm_line_arr[7],
5763 		(void *)((char *)hldev->spdm_mem_base +
5764 		(spdm_entry * 64) + (7 * 8)));
5765 
5766 	/*
5767 	 * Wait for the operation to be completed.
5768 	 */
5769 	if (__hal_device_register_poll(hldev,
5770 		&bar0->rxpic_int_reg, 1,
5771 		XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
5772 		XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
5773 		xge_os_spin_unlock(&hldev->spdm_lock);
5774 		return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5775 	}
5776 
5777 	/*
5778 	 * Make the corresponding spdm entry in the local SPDM table
5779 	 * available for future use.
5780 	 */
5781 	hldev->spdm_table[spdm_entry]->in_use = 0;
5782 	xge_os_spin_unlock(&hldev->spdm_lock);
5783 
5784 	return XGE_HAL_OK;
5785 
5786 err_exit:
5787 	xge_os_spin_unlock(&hldev->spdm_lock);
5788 	return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
5789 }
5790 
5791 /*
5792  * __hal_calc_jhash - Calculate Jenkins hash.
5793  * @msg: Jenkins hash algorithm key.
5794  * @length: Length of the key.
5795  * @golden_ratio: Jenkins hash golden ratio.
5796  * @init_value: Jenkins hash initial value.
5797  *
5798  * This function implements the Jenkins based algorithm used for the
5799  * calculation of the RTH hash.
5800  * Returns:  Jenkins hash value.
5801  *
5802  */
5803 u32 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value)
5804 {
5805 
5806 	register u32 a,b,c,len;
5807 
5808 	/*
5809 	 * Set up the internal state
5810 	 */
5811 	len = length;
5812 	a = b = golden_ratio;  /* the golden ratio; an arbitrary value */
5813 	c = init_value;         /* the previous hash value */
5814 
5815 	/*  handle most of the key */
5816 	while (len >= 12)
5817 	{
5818 		a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16)
5819 						 + ((u32)msg[3]<<24));
5820 		b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16)
5821 						 + ((u32)msg[7]<<24));
5822 		c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16)
5823 						 + ((u32)msg[11]<<24));
5824 		mix(a,b,c);
5825 		msg += 12; len -= 12;
5826 	}
5827 
5828 	/*  handle the last 11 bytes */
5829 	c += length;
5830 	switch(len)  /* all the case statements fall through */
5831 	{
5832 		case 11: c+= ((u32)msg[10]<<24);
5833 			 break;
5834 		case 10: c+= ((u32)msg[9]<<16);
5835 			 break;
5836 		case 9 : c+= ((u32)msg[8]<<8);
5837 			 break;
5838 		/* the first byte of c is reserved for the length */
5839 		case 8 : b+= ((u32)msg[7]<<24);
5840 			 break;
5841 		case 7 : b+= ((u32)msg[6]<<16);
5842 			 break;
5843 		case 6 : b+= ((u32)msg[5]<<8);
5844 			 break;
5845 		case 5 : b+= msg[4];
5846 			 break;
5847 		case 4 : a+= ((u32)msg[3]<<24);
5848 			 break;
5849 		case 3 : a+= ((u32)msg[2]<<16);
5850 			 break;
5851 		case 2 : a+= ((u32)msg[1]<<8);
5852 			 break;
5853 		case 1 : a+= msg[0];
5854 			 break;
5855 		/* case 0: nothing left to add */
5856 	}
5857 
5858 	mix(a,b,c);
5859 
5860 	/* report the result */
5861 	return c;
5862 }
5863 
5864 #if defined(XGE_HAL_MSI) | defined(XGE_HAL_MSI_X)
5865 /*
5866  * __hal_device_rti_set
5867  * @ring: The post_qid of the ring.
5868  * @channel: HAL channel of the ring.
5869  *
5870  * This function stores the RTI value associated for the MSI and
5871  * also unmasks this particular RTI in the rti_mask register.
5872  */
5873 static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel)
5874 {
5875 	xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
5876 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
5877 	u64 val64;
5878 
5879 #if defined(XGE_HAL_MSI)
5880 	channel->rti = (u8)ring_qid;
5881 #endif
5882 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5883 			    &bar0->rx_traffic_mask);
5884 	val64 &= ~BIT(ring_qid);
5885 	xge_os_pio_mem_write64(hldev->pdev,
5886 			    hldev->regh0, val64,
5887 			    &bar0->rx_traffic_mask);
5888 }
5889 
5890 /*
5891  * __hal_device_tti_set
5892  * @ring: The post_qid of the FIFO.
5893  * @channel: HAL channel the FIFO.
5894  *
5895  * This function stores the TTI value associated for the MSI and
5896  * also unmasks this particular TTI in the tti_mask register.
5897  */
5898 static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel)
5899 {
5900 	xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
5901 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
5902 	u64 val64;
5903 
5904 #if defined(XGE_HAL_MSI)
5905 	channel->tti = (u8)fifo_qid;
5906 #endif
5907 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5908 			    &bar0->tx_traffic_mask);
5909 	val64 &= ~BIT(fifo_qid);
5910 	xge_os_pio_mem_write64(hldev->pdev,
5911 			    hldev->regh0, val64,
5912 			    &bar0->tx_traffic_mask);
5913 }
5914 #endif
5915 
5916 #if defined(XGE_HAL_MSI)
5917 /**
5918  * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a
5919  * FIFO for a given MSI.
5920  * @channelh: HAL channel handle.
5921  * @msi: MSI Number associated with the channel.
5922  * @msi_msg: The MSI message associated with the MSI number above.
5923  *
5924  * This API will associate a given channel (either Ring or FIFO) with the
5925  * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the
5926  * hardware to indicate this association to the hardware.
5927  */
5928 xge_hal_status_e
5929 xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg)
5930 {
5931 	xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
5932 	xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
5933 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
5934 	u64 val64;
5935 
5936 	channel->msi_msg = msi_msg;
5937 	if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
5938 		int ring = channel->post_qid;
5939 		xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d,"
5940 				" MSI: %d\n", channel->msi_msg, ring, msi);
5941 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5942 			&bar0->rx_mat);
5943 		val64 |= XGE_HAL_SET_RX_MAT(ring, msi);
5944 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
5945 			&bar0->rx_mat);
5946 		__hal_device_rti_set(ring, channel);
5947 	} else {
5948 		int fifo = channel->post_qid;
5949 		xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d,"
5950 				" MSI: %d\n", channel->msi_msg, fifo, msi);
5951 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5952 			&bar0->tx_mat[0]);
5953 		val64 |= XGE_HAL_SET_TX_MAT(fifo, msi);
5954 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
5955 			&bar0->tx_mat[0]);
5956 		__hal_device_tti_set(fifo, channel);
5957 	}
5958 
5959 	 return XGE_HAL_OK;
5960 }
5961 #endif
5962 #if defined(XGE_HAL_MSI_X)
5963 /*
5964  * __hal_set_xmsi_vals
5965  * @devh: HAL device handle.
5966  * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address.
5967  *              Filled in by this function.
5968  * @msix_address: 32bit MSI-X DMA address.
5969  *              Filled in by this function.
5970  * @msix_idx: index that corresponds to the (@msix_value, @msix_address)
5971  *            entry in the table of MSI-X (value, address) pairs.
5972  *
5973  * This function will program the hardware associating the given
5974  * address/value cobination to the specified msi number.
5975  */
5976 static void __hal_set_xmsi_vals (xge_hal_device_h devh,
5977 				 u32 *msix_value,
5978 				 u64 *msix_addr,
5979 				 int msix_idx)
5980 {
5981 	int cnt = 0;
5982 
5983 	xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
5984 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
5985 	u64 val64;
5986 
5987 	val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE;
5988 	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
5989 			(u32)(val64 >> 32), &bar0->xmsi_access);
5990 	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
5991 				   (u32)(val64), &bar0->xmsi_access);
5992 	do {
5993 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5994 					    &bar0->xmsi_access);
5995 		if (val64 & XGE_HAL_XMSI_STROBE)
5996 			break;
5997 		cnt++;
5998 		xge_os_mdelay(20);
5999 	} while(cnt < 5);
6000 	*msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6001 			     &bar0->xmsi_data));
6002 	*msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6003 			     &bar0->xmsi_address);
6004 }
6005 
6006 /**
6007  * xge_hal_channel_msix_set - Associate MSI-X with a channel.
6008  * @channelh: HAL channel handle.
6009  * @msix_idx: index that corresponds to a particular (@msix_value,
6010  *            @msix_address) entry in the MSI-X table.
6011  *
6012  * This API associates a given channel (either Ring or FIFO) with the
6013  * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables
6014  * to indicate this association.
6015  */
6016 xge_hal_status_e
6017 xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx)
6018 {
6019 	xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6020 	xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6021 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6022 	u64 val64;
6023 	u16 msi_control_reg;
6024 
6025 	 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6026 		 /* Currently Ring and RTI is one on one. */
6027 		int ring = channel->post_qid;
6028 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6029 			&bar0->rx_mat);
6030 		val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx);
6031 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6032 			&bar0->rx_mat);
6033 		__hal_device_rti_set(ring, channel);
6034 	 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
6035 		int fifo = channel->post_qid;
6036 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6037 			&bar0->tx_mat[0]);
6038 		val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx);
6039 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6040 			&bar0->tx_mat[0]);
6041 		__hal_device_tti_set(fifo, channel);
6042 	}
6043 	 channel->msix_idx = msix_idx;
6044 	__hal_set_xmsi_vals(hldev, &channel->msix_data,
6045 			    &channel->msix_address,
6046 			    channel->msix_idx);
6047 
6048 	/*
6049 	* To enable MSI-X, MSI also needs to be enabled, due to a bug
6050 	* in the herc NIC. (Temp change, needs to be removed later)
6051 	*/
6052 	xge_os_pci_read16(hldev->pdev, hldev->cfgh,
6053 		xge_offsetof(xge_hal_pci_config_le_t, msi_control), &msi_control_reg);
6054 
6055 	msi_control_reg |= 0x1; /* Enable MSI */
6056 
6057 	xge_os_pci_write16(hldev->pdev, hldev->cfgh,
6058 			xge_offsetof(xge_hal_pci_config_le_t, msi_control), msi_control_reg);
6059 
6060 
6061 	/* Enable the MSI-X interrupt */
6062 	{
6063 		val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6064 			&bar0->xmsi_mask_reg);
6065 		val64 &= ~(1LL << ( 63 - msix_idx ));
6066 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6067 			&bar0->xmsi_mask_reg);
6068 	}
6069 
6070 	 return XGE_HAL_OK;
6071 }
6072 #endif
6073 
6074 #if defined(XGE_HAL_CONFIG_LRO)
6075 /**
6076  * xge_hal_lro_terminate - Terminate lro resources.
6077  * @lro_scale: Amount of  lro memory.
6078  * @hldev: Hal device structure.
6079  *
6080  */
6081 void
6082 xge_hal_lro_terminate(u32 lro_scale,
6083 	            xge_hal_device_t *hldev)
6084 {
6085 }
6086 
6087 /**
6088  * xge_hal_lro_init - Initiate lro resources.
6089  * @lro_scale: Amount of  lro memory.
6090  * @hldev: Hal device structure.
6091  * Note: For time being I am using only one LRO per device. Later on size
6092  * will be increased.
6093  */
6094 xge_hal_status_e
6095 xge_hal_lro_init(u32 lro_scale,
6096 	       xge_hal_device_t *hldev)
6097 {
6098 	xge_os_memzero(hldev->lro_pool,
6099 		sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS);
6100 
6101 	if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE)
6102 		hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE;
6103 
6104 	if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE)
6105 		hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN;
6106 
6107 	hldev->lro_next_idx = 0;
6108 	hldev->lro_recent = NULL;
6109 
6110 	return XGE_HAL_OK;
6111 }
6112 #endif
6113 
6114 
6115 /**
6116  * xge_hal_device_poll - HAL device "polling" entry point.
6117  * @devh: HAL device.
6118  *
6119  * HAL "polling" entry point. Note that this is part of HAL public API.
6120  * Upper-Layer driver _must_ periodically poll HAL via
6121  * xge_hal_device_poll().
6122  *
6123  * HAL uses caller's execution context to serially process accumulated
6124  * slow-path events, such as link state changes and hardware error
6125  * indications.
6126  *
6127  * The rate of polling could be somewhere between 500us to 10ms,
6128  * depending on requirements (e.g., the requirement to support fail-over
6129  * could mean that 500us or even 100us polling interval need to be used).
6130  *
6131  * The need and motivation for external polling includes
6132  *
6133  *   - remove the error-checking "burden" from the HAL interrupt handler
6134  *     (see xge_hal_device_handle_irq());
6135  *
6136  *   - remove the potential source of portability issues by _not_
6137  *     implementing separate polling thread within HAL itself.
6138  *
6139  * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}.
6140  * Usage: See ex_slow_path{}.
6141  */
6142 void
6143 xge_hal_device_poll(xge_hal_device_h devh)
6144 {
6145 	unsigned char item_buf[sizeof(xge_queue_item_t) +
6146 				XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
6147 	xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
6148 	xge_queue_status_e qstatus;
6149 	xge_hal_status_e hstatus;
6150 	int i = 0;
6151 	int queue_has_critical_event = 0;
6152 	xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6153 
6154 _again:
6155 	if (!hldev->is_initialized ||
6156 	    hldev->terminating ||
6157 	    hldev->magic != XGE_HAL_MAGIC)
6158 		return;
6159 
6160 	if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000)
6161 	{
6162 		/*
6163 	 	 * Wait for an Hour
6164 	 	 */
6165 		hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++;
6166 	} else {
6167 		/*
6168 		 * Logging Error messages in the excess temperature,
6169 		 * Bias current, laser ouput for three cycle
6170 		 */
6171 		__hal_updt_stats_xpak(hldev);
6172 		hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0;
6173 	}
6174 
6175 	if (!queue_has_critical_event)
6176 	        queue_has_critical_event =
6177 			__queue_get_reset_critical(hldev->queueh);
6178 
6179 	hldev->in_poll = 1;
6180 	while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) {
6181 
6182 		qstatus = xge_queue_consume(hldev->queueh,
6183 				    XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
6184 				    item);
6185 		if (qstatus == XGE_QUEUE_IS_EMPTY)
6186 			break;
6187 
6188 		xge_debug_queue(XGE_TRACE,
6189 			 "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x"
6190 			 XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type,
6191 			 (u64)(ulong_t)item->context);
6192 
6193 		if (!hldev->is_initialized ||
6194 		    hldev->magic != XGE_HAL_MAGIC) {
6195 			hldev->in_poll = 0;
6196 			return;
6197 		}
6198 
6199 		switch (item->event_type) {
6200 		case XGE_HAL_EVENT_LINK_IS_UP: {
6201 			if (!queue_has_critical_event &&
6202 			    g_xge_hal_driver->uld_callbacks.link_up) {
6203 				g_xge_hal_driver->uld_callbacks.link_up(
6204 					hldev->upper_layer_info);
6205 				hldev->link_state = XGE_HAL_LINK_UP;
6206 			}
6207 		} break;
6208 		case XGE_HAL_EVENT_LINK_IS_DOWN: {
6209 			if (!queue_has_critical_event &&
6210 			    g_xge_hal_driver->uld_callbacks.link_down) {
6211 				g_xge_hal_driver->uld_callbacks.link_down(
6212 					hldev->upper_layer_info);
6213 				hldev->link_state = XGE_HAL_LINK_DOWN;
6214 			}
6215 		} break;
6216 		case XGE_HAL_EVENT_SERR:
6217 		case XGE_HAL_EVENT_ECCERR:
6218 		case XGE_HAL_EVENT_PARITYERR:
6219 		case XGE_HAL_EVENT_TARGETABORT:
6220 		case XGE_HAL_EVENT_SLOT_FREEZE: {
6221 			void *item_data = xge_queue_item_data(item);
6222 			xge_hal_event_e event_type = item->event_type;
6223 			u64 val64 = *((u64*)item_data);
6224 
6225 			if (event_type != XGE_HAL_EVENT_SLOT_FREEZE)
6226 				if (xge_hal_device_is_slot_freeze(hldev))
6227 					event_type = XGE_HAL_EVENT_SLOT_FREEZE;
6228 			if (g_xge_hal_driver->uld_callbacks.crit_err) {
6229 			    g_xge_hal_driver->uld_callbacks.crit_err(
6230 					hldev->upper_layer_info,
6231 					event_type,
6232 					val64);
6233 				/* handle one critical event per poll cycle */
6234 				hldev->in_poll = 0;
6235 				return;
6236 			}
6237 		} break;
6238 		default: {
6239 			xge_debug_queue(XGE_TRACE,
6240 				"got non-HAL event %d",
6241 				item->event_type);
6242 		} break;
6243 		}
6244 
6245 		/* broadcast this event */
6246 		if (g_xge_hal_driver->uld_callbacks.event)
6247 			g_xge_hal_driver->uld_callbacks.event(item);
6248 	}
6249 
6250 	if (g_xge_hal_driver->uld_callbacks.before_device_poll) {
6251 		if (g_xge_hal_driver->uld_callbacks.before_device_poll(
6252 					     hldev) != 0) {
6253 			hldev->in_poll = 0;
6254 			return;
6255 		}
6256 	}
6257 
6258 	hstatus = __hal_device_poll(hldev);
6259 	if (g_xge_hal_driver->uld_callbacks.after_device_poll)
6260 	    g_xge_hal_driver->uld_callbacks.after_device_poll(hldev);
6261 
6262 	/*
6263 	 * handle critical error right away:
6264 	 * - walk the device queue again
6265 	 * - drop non-critical events, if any
6266 	 * - look for the 1st critical
6267 	 */
6268 	if (hstatus == XGE_HAL_ERR_CRITICAL) {
6269 	        queue_has_critical_event = 1;
6270 		goto _again;
6271 	}
6272 
6273 	hldev->in_poll = 0;
6274 }
6275 
6276 /**
6277  * xge_hal_rts_rth_init - Set enhanced mode for  RTS hashing.
6278  * @hldev: HAL device handle.
6279  *
6280  * This function is used to set the adapter to enhanced mode.
6281  *
6282  * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
6283  */
6284 void
6285 xge_hal_rts_rth_init(xge_hal_device_t *hldev)
6286 {
6287 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6288 	u64 val64;
6289 
6290 	/*
6291 	 * Set the receive traffic steering mode from default(classic)
6292 	 * to enhanced.
6293 	 */
6294 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6295 				      &bar0->rts_ctrl);
6296 	val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6297 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6298 			       val64, &bar0->rts_ctrl);
6299 }
6300 
6301 /**
6302  * xge_hal_rts_rth_clr - Clear RTS hashing.
6303  * @hldev: HAL device handle.
6304  *
6305  * This function is used to clear all RTS hashing related stuff.
6306  * It brings the adapter out from enhanced mode to classic mode.
6307  * It also clears RTS_RTH_CFG register i.e clears hash type, function etc.
6308  *
6309  * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set().
6310  */
6311 void
6312 xge_hal_rts_rth_clr(xge_hal_device_t *hldev)
6313 {
6314 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6315 	u64 val64;
6316 
6317 	/*
6318 	 * Set the receive traffic steering mode from default(classic)
6319 	 * to enhanced.
6320 	 */
6321 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6322 				      &bar0->rts_ctrl);
6323 	val64 &=  ~XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6324 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6325 			       val64, &bar0->rts_ctrl);
6326 	val64 = 0;
6327 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6328 			       &bar0->rts_rth_cfg);
6329 }
6330 
6331 /**
6332  * xge_hal_rts_rth_set - Set/configure RTS hashing.
6333  * @hldev: HAL device handle.
6334  * @def_q: default queue
6335  * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc.
6336  * @bucket_size: no of least significant bits to be used for hashing.
6337  *
6338  * Used to set/configure all RTS hashing related stuff.
6339  * - set the steering mode to enhanced.
6340  * - set hash function i.e algo selection.
6341  * - set the default queue.
6342  *
6343  * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set().
6344  */
6345 void
6346 xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type,
6347 		    u16 bucket_size)
6348 {
6349 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6350 	u64 val64;
6351 
6352 	val64 = XGE_HAL_RTS_DEFAULT_Q(def_q);
6353 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6354 			       &bar0->rts_default_q);
6355 
6356 	val64 = hash_type;
6357 	val64 |= XGE_HAL_RTS_RTH_EN;
6358 	val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size);
6359 	val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS;
6360 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6361 			       &bar0->rts_rth_cfg);
6362 }
6363 
6364 /**
6365  * xge_hal_rts_rth_start - Start RTS hashing.
6366  * @hldev: HAL device handle.
6367  *
6368  * Used to Start RTS hashing .
6369  *
6370  * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
6371  */
6372 void
6373 xge_hal_rts_rth_start(xge_hal_device_t *hldev)
6374 {
6375 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6376 	u64 val64;
6377 
6378 
6379 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6380 				      &bar0->rts_rth_cfg);
6381 	val64 |= XGE_HAL_RTS_RTH_EN;
6382 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6383 			       &bar0->rts_rth_cfg);
6384 }
6385 
6386 /**
6387  * xge_hal_rts_rth_stop - Stop the RTS hashing.
6388  * @hldev: HAL device handle.
6389  *
6390  * Used to Staop RTS hashing .
6391  *
6392  * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
6393  */
6394 void
6395 xge_hal_rts_rth_stop(xge_hal_device_t *hldev)
6396 {
6397 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6398 	u64 val64;
6399 
6400 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6401 				      &bar0->rts_rth_cfg);
6402 	val64 &=  ~XGE_HAL_RTS_RTH_EN;
6403 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6404 			       &bar0->rts_rth_cfg);
6405 }
6406 
6407 /**
6408  * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT).
6409  * @hldev: HAL device handle.
6410  * @itable: Pointer to the indirection table
6411  * @itable_size: no of least significant bits to be used for hashing
6412  *
6413  * Used to set/configure indirection table.
6414  * It enables the required no of entries in the IT.
6415  * It adds entries to the IT.
6416  *
6417  * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
6418  */
6419 xge_hal_status_e
6420 xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size)
6421 {
6422 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void	*)hldev->bar0;
6423 	u64 val64;
6424 	u32 idx;
6425 
6426 	for (idx = 0; idx < itable_size; idx++) {
6427 		val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
6428 			XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]);
6429 
6430 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6431 				       &bar0->rts_rth_map_mem_data);
6432 
6433 		/* execute */
6434 		val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
6435 			 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
6436 			 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx));
6437 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6438 				       &bar0->rts_rth_map_mem_ctrl);
6439 
6440 		/* poll until done */
6441 		if (__hal_device_register_poll(hldev,
6442 		       &bar0->rts_rth_map_mem_ctrl, 0,
6443 		       XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
6444 		       XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6445 			/* upper layer may require to repeat */
6446 			return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6447 		}
6448 	}
6449 
6450 	return XGE_HAL_OK;
6451 }
6452 
6453 
6454 /**
6455  * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc.
6456  *
6457  * @hldev: HAL device handle.
6458  * @KeySize: Number of 64-bit words
6459  * @Key: upto 40-byte array of 8-bit values
6460  * This function configures the 40-byte secret which is used for hash
6461  * calculation.
6462  *
6463  * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
6464  */
6465 void
6466 xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key)
6467 {
6468 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0;
6469 	u64 val64;
6470 	u32 entry, nreg, i;
6471 
6472 	entry = 0;
6473 	nreg = 0;
6474 
6475 	while( KeySize ) {
6476 		val64 = 0;
6477 		for ( i = 0; i < 8 ; i++) {
6478 			/* Prepare 64-bit word for 'nreg' containing 8 keys. */
6479 			if (i)
6480 				val64 <<= 8;
6481 			val64 |= Key[entry++];
6482 		}
6483 
6484 		KeySize--;
6485 
6486 		/* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/
6487 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6488 				       &bar0->rts_rth_hash_mask[nreg++]);
6489 	}
6490 
6491 	while( nreg < 5 ) {
6492 		/* Clear the rest if key is less than 40 bytes */
6493 		val64 = 0;
6494 		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6495 				       &bar0->rts_rth_hash_mask[nreg++]);
6496 	}
6497 }
6498 
6499 
6500 /**
6501  * xge_hal_device_is_closed - Device is closed
6502  *
6503  * @devh: HAL device handle.
6504  */
6505 int
6506 xge_hal_device_is_closed(xge_hal_device_h devh)
6507 {
6508 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6509 
6510 	if (xge_list_is_empty(&hldev->fifo_channels) &&
6511 	    xge_list_is_empty(&hldev->ring_channels))
6512 		return 1;
6513 
6514 	return 0;
6515 }
6516 
6517 xge_hal_status_e
6518 xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index)
6519 {
6520 	u64 val64;
6521 	int section;
6522 	int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
6523 
6524 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6525 	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6526 
6527 	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
6528 		max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
6529 
6530 	if ( index >= max_addr )
6531 		return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
6532 
6533 	/*
6534 	 * Calculate the section value
6535 	 */
6536 	section = index / 32;
6537 
6538         xge_debug_device(XGE_TRACE, "the Section value is %d \n", section);
6539 
6540 	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6541 				&bar0->rts_mac_cfg);
6542 	switch(section)
6543 	{
6544 		case 0:
6545 			val64 |=  XGE_HAL_RTS_MAC_SECT0_EN;
6546 			break;
6547 		case 1:
6548 			val64 |=  XGE_HAL_RTS_MAC_SECT1_EN;
6549 			break;
6550 		case 2:
6551 			val64 |=  XGE_HAL_RTS_MAC_SECT2_EN;
6552 			break;
6553 		case 3:
6554 			val64 |=  XGE_HAL_RTS_MAC_SECT3_EN;
6555 			break;
6556 		case 4:
6557 			val64 |=  XGE_HAL_RTS_MAC_SECT4_EN;
6558 			break;
6559 		case 5:
6560 			val64 |=  XGE_HAL_RTS_MAC_SECT5_EN;
6561 			break;
6562 		case 6:
6563 			val64 |=  XGE_HAL_RTS_MAC_SECT6_EN;
6564 			break;
6565 		case 7:
6566 			val64 |=  XGE_HAL_RTS_MAC_SECT7_EN;
6567 			break;
6568 		default:
6569 			xge_debug_device(XGE_ERR, "Invalid Section value %d \n"
6570 					, section);
6571         }
6572 
6573 	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6574 				val64, &bar0->rts_mac_cfg);
6575 	return XGE_HAL_OK;
6576 }
6577 
6578 #ifdef XGEHAL_RNIC
6579 
6580 static u8 __hal_device_free_bit[256] = {
6581 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6582 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6583 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6584 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6585 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6586 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6587 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6588 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6589 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
6590 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
6591 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
6592 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
6593 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
6594 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
6595 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
6596 	4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 7, 8 };
6597 
6598 xge_hal_status_e
6599 __hal_device_oid_allocate(xge_hal_rnic_oid_db_t *objdb, u32 *objid)
6600 {
6601 	u32 i;
6602 	u32 fb;
6603 
6604 	if(objid == NULL)
6605 		return XGE_HAL_FAIL;
6606 
6607 	for( i = objdb->id_next_byte; i < sizeof(objdb->id_map); i++ )
6608 	{
6609 		fb = __hal_device_free_bit[objdb->id_map[i]];
6610 
6611 		if(fb < 8){
6612 			*objid = XGE_HAL_RNIC_OID_DB_OID_GET((i*8+fb),
6613 					objdb->id_inst_number);
6614 			objdb->id_next_byte = i;
6615 			objdb->id_map[i] |= (0x80 >> fb);
6616 			return XGE_HAL_OK;
6617 		}
6618 	}
6619 
6620 	objdb->id_inst_number++;
6621 
6622 	for( i = 0; i < objdb->id_next_byte; i++ )
6623 	{
6624 		fb = __hal_device_free_bit[objdb->id_map[i]];
6625 
6626 		if(fb < 8){
6627 			*objid = XGE_HAL_RNIC_OID_DB_OID_GET((i*8+fb),
6628 					objdb->id_inst_number);
6629 			objdb->id_next_byte = i;
6630 			objdb->id_map[i] |= (0x80 >> fb);
6631 			return XGE_HAL_OK;
6632 		}
6633 	}
6634 
6635 	return XGE_HAL_FAIL;
6636 }
6637 
6638 xge_hal_status_e
6639 __hal_device_oid_free(xge_hal_rnic_oid_db_t *objdb, u32 objid)
6640 {
6641 	u32 i;
6642 	u32 fb;
6643 
6644 	i = XGE_HAL_RNIC_OID_DB_SID_GET(objid) / 8;
6645 	fb = XGE_HAL_RNIC_OID_DB_SID_GET(objid) - i * 8;
6646 
6647 	if( i >= sizeof(objdb->id_map) )
6648 		return XGE_HAL_FAIL;
6649 
6650 	objdb->id_map[i] &= ~(0x80 >> fb);
6651 
6652 	return XGE_HAL_OK;
6653 }
6654 
6655 #endif
6656 
6657