xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_fflp.c (revision 59ac0c16)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <npi_fflp.h>
29 #include <npi_mac.h>
30 #include <nxge_defs.h>
31 #include <nxge_flow.h>
32 #include <nxge_fflp.h>
33 #include <nxge_impl.h>
34 #include <nxge_fflp_hash.h>
35 #include <nxge_common.h>
36 
37 
38 /*
39  * Function prototypes
40  */
41 static nxge_status_t nxge_fflp_vlan_tbl_clear_all(p_nxge_t);
42 static nxge_status_t nxge_fflp_tcam_invalidate_all(p_nxge_t);
43 static nxge_status_t nxge_fflp_tcam_init(p_nxge_t);
44 static nxge_status_t nxge_fflp_fcram_invalidate_all(p_nxge_t);
45 static nxge_status_t nxge_fflp_fcram_init(p_nxge_t);
46 static int nxge_flow_need_hash_lookup(p_nxge_t, flow_resource_t *);
47 static void nxge_fill_tcam_entry_tcp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
48 static void nxge_fill_tcam_entry_udp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
49 static void nxge_fill_tcam_entry_sctp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
50 static void nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t, flow_spec_t *,
51 	tcam_entry_t *);
52 static void nxge_fill_tcam_entry_udp_ipv6(p_nxge_t, flow_spec_t *,
53 	tcam_entry_t *);
54 static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *,
55 	tcam_entry_t *);
56 static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, intptr_t);
57 static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, intptr_t);
58 static tcam_location_t nxge_get_tcam_location(p_nxge_t, uint8_t);
59 
60 /*
61  * functions used outside this file
62  */
63 nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
64 nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
65 nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
66 static nxge_status_t nxge_tcam_handle_ip_fragment(p_nxge_t);
67 nxge_status_t nxge_add_tcam_entry(p_nxge_t, flow_resource_t *);
68 nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *);
69 nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *,
70 	uint32_t *, uint16_t *);
71 
72 nxge_status_t
73 nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location)
74 {
75 	tcam_entry_t tcam_rdptr;
76 	uint64_t asc_ram = 0;
77 	npi_handle_t handle;
78 	npi_status_t status;
79 
80 	handle = nxgep->npi_reg_handle;
81 
82 	bzero((char *)&tcam_rdptr, sizeof (struct tcam_entry));
83 	status = npi_fflp_tcam_entry_read(handle, (tcam_location_t)location,
84 		(struct tcam_entry *)&tcam_rdptr);
85 	if (status & NPI_FAILURE) {
86 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
87 			" nxge_tcam_dump_entry:"
88 			"  tcam read failed at location %d ", location));
89 		return (NXGE_ERROR);
90 	}
91 	status = npi_fflp_tcam_asc_ram_entry_read(handle,
92 		(tcam_location_t)location, &asc_ram);
93 
94 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "location %x\n"
95 		" key:  %llx %llx %llx %llx \n"
96 		" mask: %llx %llx %llx %llx \n"
97 		" ASC RAM %llx \n", location,
98 		tcam_rdptr.key0, tcam_rdptr.key1,
99 		tcam_rdptr.key2, tcam_rdptr.key3,
100 		tcam_rdptr.mask0, tcam_rdptr.mask1,
101 		tcam_rdptr.mask2, tcam_rdptr.mask3, asc_ram));
102 	return (NXGE_OK);
103 }
104 
105 void
106 nxge_get_tcam(p_nxge_t nxgep, p_mblk_t mp)
107 {
108 	uint32_t tcam_loc;
109 	int *lptr;
110 	int location;
111 
112 	uint32_t start_location = 0;
113 	uint32_t stop_location = nxgep->classifier.tcam_size;
114 	lptr = (int *)mp->b_rptr;
115 	location = *lptr;
116 
117 	if ((location >= nxgep->classifier.tcam_size) || (location < -1)) {
118 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
119 			"nxge_tcam_dump: Invalid location %d \n", location));
120 		return;
121 	}
122 	if (location == -1) {
123 		start_location = 0;
124 		stop_location = nxgep->classifier.tcam_size;
125 	} else {
126 		start_location = location;
127 		stop_location = location + 1;
128 	}
129 	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
130 		(void) nxge_tcam_dump_entry(nxgep, tcam_loc);
131 }
132 
133 /*
134  * nxge_fflp_vlan_table_invalidate_all
135  * invalidates the vlan RDC table entries.
136  * INPUT
137  * nxge    soft state data structure
138  * Return
139  *      NXGE_OK
140  *      NXGE_ERROR
141  *
142  */
143 
144 static nxge_status_t
145 nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)
146 {
147 	vlan_id_t vlan_id;
148 	npi_handle_t handle;
149 	npi_status_t rs = NPI_SUCCESS;
150 	vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
151 
152 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_vlan_tbl_clear_all "));
153 	handle = nxgep->npi_reg_handle;
154 	for (vlan_id = start; vlan_id < stop; vlan_id++) {
155 		rs = npi_fflp_cfg_vlan_table_clear(handle, vlan_id);
156 		if (rs != NPI_SUCCESS) {
157 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
158 				"VLAN Table invalidate failed for vlan id %d ",
159 				vlan_id));
160 			return (NXGE_ERROR | rs);
161 		}
162 	}
163 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_vlan_tbl_clear_all "));
164 	return (NXGE_OK);
165 }
166 
167 /*
168  * The following functions are used by other modules to init
169  * the fflp module.
170  * these functions are the basic API used to init
171  * the fflp modules (tcam, fcram etc ......)
172  *
173  * The TCAM search future would be disabled  by default.
174  */
175 
176 static nxge_status_t
177 nxge_fflp_tcam_init(p_nxge_t nxgep)
178 {
179 	uint8_t access_ratio;
180 	tcam_class_t class;
181 	npi_status_t rs = NPI_SUCCESS;
182 	npi_handle_t handle;
183 
184 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_tcam_init"));
185 	handle = nxgep->npi_reg_handle;
186 
187 	rs = npi_fflp_cfg_tcam_disable(handle);
188 	if (rs != NPI_SUCCESS) {
189 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed TCAM Disable\n"));
190 		return (NXGE_ERROR | rs);
191 	}
192 
193 	access_ratio = nxgep->param_arr[param_tcam_access_ratio].value;
194 	rs = npi_fflp_cfg_tcam_access(handle, access_ratio);
195 	if (rs != NPI_SUCCESS) {
196 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
197 				"failed TCAM Access cfg\n"));
198 		return (NXGE_ERROR | rs);
199 	}
200 
201 	/* disable configurable classes */
202 	/* disable the configurable ethernet classes; */
203 	for (class = TCAM_CLASS_ETYPE_1;
204 		class <= TCAM_CLASS_ETYPE_2; class++) {
205 		rs = npi_fflp_cfg_enet_usr_cls_disable(handle, class);
206 		if (rs != NPI_SUCCESS) {
207 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
208 				"TCAM USR Ether Class config failed."));
209 			return (NXGE_ERROR | rs);
210 		}
211 	}
212 
213 	/* disable the configurable ip classes; */
214 	for (class = TCAM_CLASS_IP_USER_4;
215 		class <= TCAM_CLASS_IP_USER_7; class++) {
216 		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
217 		if (rs != NPI_SUCCESS) {
218 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
219 				"TCAM USR IP Class cnfg failed."));
220 			return (NXGE_ERROR | rs);
221 		}
222 	}
223 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_tcam_init"));
224 	return (NXGE_OK);
225 }
226 
227 /*
228  * nxge_fflp_tcam_invalidate_all
229  * invalidates all the tcam entries.
230  * INPUT
231  * nxge    soft state data structure
232  * Return
233  *      NXGE_OK
234  *      NXGE_ERROR
235  *
236  */
237 
238 
239 static nxge_status_t
240 nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)
241 {
242 	uint16_t location;
243 	npi_status_t rs = NPI_SUCCESS;
244 	npi_handle_t handle;
245 	uint16_t start = 0, stop = nxgep->classifier.tcam_size;
246 	p_nxge_hw_list_t hw_p;
247 
248 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
249 		"==> nxge_fflp_tcam_invalidate_all"));
250 	handle = nxgep->npi_reg_handle;
251 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
252 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
253 			" nxge_fflp_tcam_invalidate_all:"
254 			" common hardware not set", nxgep->niu_type));
255 		return (NXGE_ERROR);
256 	}
257 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
258 	for (location = start; location < stop; location++) {
259 		rs = npi_fflp_tcam_entry_invalidate(handle, location);
260 		if (rs != NPI_SUCCESS) {
261 			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
262 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
263 				"TCAM invalidate failed at loc %d ", location));
264 			return (NXGE_ERROR | rs);
265 		}
266 	}
267 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
268 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
269 			"<== nxge_fflp_tcam_invalidate_all"));
270 	return (NXGE_OK);
271 }
272 
273 /*
274  * nxge_fflp_fcram_entry_invalidate_all
275  * invalidates all the FCRAM entries.
276  * INPUT
277  * nxge    soft state data structure
278  * Return
279  *      NXGE_OK
280  *      NXGE_ERROR
281  *
282  */
283 
284 static nxge_status_t
285 nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)
286 {
287 	npi_handle_t handle;
288 	npi_status_t rs = NPI_SUCCESS;
289 	part_id_t pid = 0;
290 	uint8_t base_mask, base_reloc;
291 	fcram_entry_t fc;
292 	uint32_t location;
293 	uint32_t increment, last_location;
294 
295 	/*
296 	 * (1) configure and enable partition 0 with no relocation
297 	 * (2) Assume the FCRAM is used as IPv4 exact match entry cells
298 	 * (3) Invalidate these cells by clearing the valid bit in
299 	 * the subareas 0 and 4
300 	 * (4) disable the partition
301 	 *
302 	 */
303 
304 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_invalidate_all"));
305 
306 	base_mask = base_reloc = 0x0;
307 	handle = nxgep->npi_reg_handle;
308 	rs = npi_fflp_cfg_fcram_partition(handle, pid, base_mask, base_reloc);
309 
310 	if (rs != NPI_SUCCESS) {
311 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed partition cfg\n"));
312 		return (NXGE_ERROR | rs);
313 	}
314 	rs = npi_fflp_cfg_fcram_partition_disable(handle, pid);
315 
316 	if (rs != NPI_SUCCESS) {
317 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
318 			"failed partition enable\n"));
319 		return (NXGE_ERROR | rs);
320 	}
321 	fc.dreg[0].value = 0;
322 	fc.hash_hdr_valid = 0;
323 	fc.hash_hdr_ext = 1;	/* specify as IPV4 exact match entry */
324 	increment = sizeof (hash_ipv4_t);
325 	last_location = FCRAM_SIZE * 0x40;
326 
327 	for (location = 0; location < last_location; location += increment) {
328 		rs = npi_fflp_fcram_subarea_write(handle, pid,
329 			location,
330 			fc.value[0]);
331 		if (rs != NPI_SUCCESS) {
332 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
333 					"failed write"
334 					"at location %x ",
335 					location));
336 			return (NXGE_ERROR | rs);
337 		}
338 	}
339 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_invalidate_all"));
340 	return (NXGE_OK);
341 }
342 
343 static nxge_status_t
344 nxge_fflp_fcram_init(p_nxge_t nxgep)
345 {
346 	fflp_fcram_output_drive_t strength;
347 	fflp_fcram_qs_t qs;
348 	npi_status_t rs = NPI_SUCCESS;
349 	uint8_t access_ratio;
350 	int partition;
351 	npi_handle_t handle;
352 	uint32_t min_time, max_time, sys_time;
353 
354 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_init"));
355 
356 	/*
357 	 * Recommended values are needed.
358 	 */
359 	min_time = FCRAM_REFRESH_DEFAULT_MIN_TIME;
360 	max_time = FCRAM_REFRESH_DEFAULT_MAX_TIME;
361 	sys_time = FCRAM_REFRESH_DEFAULT_SYS_TIME;
362 
363 	handle = nxgep->npi_reg_handle;
364 	strength = FCRAM_OUTDR_NORMAL;
365 	qs = FCRAM_QS_MODE_QS;
366 	rs = npi_fflp_cfg_fcram_reset(handle, strength, qs);
367 	if (rs != NPI_SUCCESS) {
368 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Reset. "));
369 		return (NXGE_ERROR | rs);
370 	}
371 
372 	access_ratio = nxgep->param_arr[param_fcram_access_ratio].value;
373 	rs = npi_fflp_cfg_fcram_access(handle, access_ratio);
374 	if (rs != NPI_SUCCESS) {
375 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Access ratio"
376 			"configuration \n"));
377 		return (NXGE_ERROR | rs);
378 	}
379 	rs = npi_fflp_cfg_fcram_refresh_time(handle, min_time,
380 		max_time, sys_time);
381 	if (rs != NPI_SUCCESS) {
382 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
383 			"failed FCRAM refresh cfg"));
384 		return (NXGE_ERROR);
385 	}
386 
387 	/* disable all the partitions until explicitly enabled */
388 	for (partition = 0; partition < FFLP_FCRAM_MAX_PARTITION; partition++) {
389 		rs = npi_fflp_cfg_fcram_partition_disable(handle, partition);
390 		if (rs != NPI_SUCCESS) {
391 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
392 				"failed FCRAM partition"
393 				" enable for partition %d ", partition));
394 			return (NXGE_ERROR | rs);
395 		}
396 	}
397 
398 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_init"));
399 	return (NXGE_OK);
400 }
401 
402 nxge_status_t
403 nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep, uint8_t alt_mac)
404 {
405 	npi_status_t rs = NPI_SUCCESS;
406 	hostinfo_t mac_rdc;
407 	npi_handle_t handle;
408 	p_nxge_class_pt_cfg_t p_class_cfgp;
409 
410 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
411 	if (p_class_cfgp->mac_host_info[alt_mac].flag == 0) {
412 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
413 			" nxge_logical_mac_assign_rdc_table"
414 			" unconfigured alt MAC addr %d ", alt_mac));
415 		return (NXGE_ERROR);
416 	}
417 	handle = nxgep->npi_reg_handle;
418 	mac_rdc.value = 0;
419 	mac_rdc.bits.w0.rdc_tbl_num =
420 		p_class_cfgp->mac_host_info[alt_mac].rdctbl;
421 	mac_rdc.bits.w0.mac_pref = p_class_cfgp->mac_host_info[alt_mac].mpr_npr;
422 
423 	rs = npi_mac_hostinfo_entry(handle, OP_SET,
424 		nxgep->function_num, alt_mac, &mac_rdc);
425 
426 	if (rs != NPI_SUCCESS) {
427 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
428 			"failed Assign RDC table"));
429 		return (NXGE_ERROR | rs);
430 	}
431 	return (NXGE_OK);
432 }
433 
434 nxge_status_t
435 nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)
436 {
437 	npi_status_t rs = NPI_SUCCESS;
438 	hostinfo_t mac_rdc;
439 	npi_handle_t handle;
440 
441 	handle = nxgep->npi_reg_handle;
442 	mac_rdc.value = 0;
443 	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mac_rdcgrp;
444 	mac_rdc.bits.w0.mac_pref = 1;
445 	switch (nxgep->function_num) {
446 	case 0:
447 	case 1:
448 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
449 			nxgep->function_num, XMAC_UNIQUE_HOST_INFO_ENTRY,
450 			&mac_rdc);
451 		break;
452 	case 2:
453 	case 3:
454 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
455 			nxgep->function_num, BMAC_UNIQUE_HOST_INFO_ENTRY,
456 			&mac_rdc);
457 		break;
458 	default:
459 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
460 			"failed Assign RDC table (invalid function #)"));
461 		return (NXGE_ERROR);
462 	}
463 
464 	if (rs != NPI_SUCCESS) {
465 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
466 				"failed Assign RDC table"));
467 		return (NXGE_ERROR | rs);
468 	}
469 	return (NXGE_OK);
470 }
471 
472 /*
473  * Initialize hostinfo registers for alternate MAC addresses and
474  * multicast MAC address.
475  */
476 nxge_status_t
477 nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)
478 {
479 	npi_status_t rs = NPI_SUCCESS;
480 	hostinfo_t mac_rdc;
481 	npi_handle_t handle;
482 	int i;
483 
484 	handle = nxgep->npi_reg_handle;
485 	mac_rdc.value = 0;
486 	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mcast_rdcgrp;
487 	mac_rdc.bits.w0.mac_pref = 1;
488 	switch (nxgep->function_num) {
489 	case 0:
490 	case 1:
491 		/*
492 		 * Tests indicate that it is OK not to re-initialize the
493 		 * hostinfo registers for the XMAC's alternate MAC
494 		 * addresses. But that is necessary for BMAC (case 2
495 		 * and case 3 below)
496 		 */
497 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
498 			nxgep->function_num,
499 			XMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
500 		break;
501 	case 2:
502 	case 3:
503 		for (i = 1; i <= BMAC_MAX_ALT_ADDR_ENTRY; i++)
504 			rs |= npi_mac_hostinfo_entry(handle, OP_SET,
505 			nxgep->function_num, i, &mac_rdc);
506 
507 		rs |= npi_mac_hostinfo_entry(handle, OP_SET,
508 			nxgep->function_num,
509 			BMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
510 		break;
511 	default:
512 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
513 			"failed Assign RDC table (invalid funcion #)"));
514 		return (NXGE_ERROR);
515 	}
516 
517 	if (rs != NPI_SUCCESS) {
518 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
519 			"failed Assign RDC table"));
520 		return (NXGE_ERROR | rs);
521 	}
522 	return (NXGE_OK);
523 }
524 
525 nxge_status_t
526 nxge_fflp_init_hostinfo(p_nxge_t nxgep)
527 {
528 	nxge_status_t status = NXGE_OK;
529 
530 	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
531 	status |= nxge_main_mac_assign_rdc_table(nxgep);
532 	return (status);
533 }
534 
535 nxge_status_t
536 nxge_fflp_hw_reset(p_nxge_t nxgep)
537 {
538 	npi_handle_t handle;
539 	npi_status_t rs = NPI_SUCCESS;
540 	nxge_status_t status = NXGE_OK;
541 
542 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_hw_reset"));
543 
544 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) {
545 		status = nxge_fflp_fcram_init(nxgep);
546 		if (status != NXGE_OK) {
547 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
548 				" failed FCRAM init. "));
549 			return (status);
550 		}
551 	}
552 
553 	status = nxge_fflp_tcam_init(nxgep);
554 	if (status != NXGE_OK) {
555 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
556 			"failed TCAM init."));
557 		return (status);
558 	}
559 
560 	handle = nxgep->npi_reg_handle;
561 	rs = npi_fflp_cfg_llcsnap_enable(handle);
562 	if (rs != NPI_SUCCESS) {
563 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
564 			"failed LLCSNAP enable. "));
565 		return (NXGE_ERROR | rs);
566 	}
567 
568 	rs = npi_fflp_cfg_cam_errorcheck_disable(handle);
569 	if (rs != NPI_SUCCESS) {
570 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
571 			"failed CAM Error Check enable. "));
572 		return (NXGE_ERROR | rs);
573 	}
574 
575 	/* init the hash generators */
576 	rs = npi_fflp_cfg_hash_h1poly(handle, 0);
577 	if (rs != NPI_SUCCESS) {
578 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
579 			"failed H1 Poly Init. "));
580 		return (NXGE_ERROR | rs);
581 	}
582 
583 	rs = npi_fflp_cfg_hash_h2poly(handle, 0);
584 	if (rs != NPI_SUCCESS) {
585 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
586 			"failed H2 Poly Init. "));
587 		return (NXGE_ERROR | rs);
588 	}
589 
590 	/* invalidate TCAM entries */
591 	status = nxge_fflp_tcam_invalidate_all(nxgep);
592 	if (status != NXGE_OK) {
593 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
594 			"failed TCAM Entry Invalidate. "));
595 		return (status);
596 	}
597 
598 	/* invalidate FCRAM entries */
599 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) {
600 		status = nxge_fflp_fcram_invalidate_all(nxgep);
601 		if (status != NXGE_OK) {
602 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
603 					"failed FCRAM Entry Invalidate."));
604 			return (status);
605 		}
606 	}
607 
608 	/* invalidate VLAN RDC tables */
609 	status = nxge_fflp_vlan_tbl_clear_all(nxgep);
610 	if (status != NXGE_OK) {
611 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
612 			"failed VLAN Table Invalidate. "));
613 		return (status);
614 	}
615 	nxgep->classifier.state |= NXGE_FFLP_HW_RESET;
616 
617 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_hw_reset"));
618 	return (NXGE_OK);
619 }
620 
621 nxge_status_t
622 nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep, tcam_class_t l3_class,
623 	uint32_t class_config)
624 {
625 	flow_key_cfg_t fcfg;
626 	npi_handle_t handle;
627 	npi_status_t rs = NPI_SUCCESS;
628 
629 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key"));
630 	handle = nxgep->npi_reg_handle;
631 	bzero(&fcfg, sizeof (flow_key_cfg_t));
632 
633 	if (class_config & NXGE_CLASS_FLOW_USE_PROTO)
634 		fcfg.use_proto = 1;
635 	if (class_config & NXGE_CLASS_FLOW_USE_DST_PORT)
636 		fcfg.use_dport = 1;
637 	if (class_config & NXGE_CLASS_FLOW_USE_SRC_PORT)
638 		fcfg.use_sport = 1;
639 	if (class_config & NXGE_CLASS_FLOW_USE_IPDST)
640 		fcfg.use_daddr = 1;
641 	if (class_config & NXGE_CLASS_FLOW_USE_IPSRC)
642 		fcfg.use_saddr = 1;
643 	if (class_config & NXGE_CLASS_FLOW_USE_VLAN)
644 		fcfg.use_vlan = 1;
645 	if (class_config & NXGE_CLASS_FLOW_USE_L2DA)
646 		fcfg.use_l2da = 1;
647 	if (class_config & NXGE_CLASS_FLOW_USE_PORTNUM)
648 		fcfg.use_portnum = 1;
649 	fcfg.ip_opts_exist = 0;
650 
651 	rs = npi_fflp_cfg_ip_cls_flow_key(handle, l3_class, &fcfg);
652 	if (rs & NPI_FFLP_ERROR) {
653 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
654 			" opt %x for class %d failed ",
655 			class_config, l3_class));
656 		return (NXGE_ERROR | rs);
657 	}
658 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_cfg_ip_cls_flow_key"));
659 	return (NXGE_OK);
660 }
661 
662 nxge_status_t
663 nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep, tcam_class_t l3_class,
664 	uint32_t *class_config)
665 {
666 	flow_key_cfg_t fcfg;
667 	npi_handle_t handle;
668 	npi_status_t rs = NPI_SUCCESS;
669 	uint32_t ccfg = 0;
670 
671 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key_get"));
672 	handle = nxgep->npi_reg_handle;
673 	bzero(&fcfg, sizeof (flow_key_cfg_t));
674 
675 	rs = npi_fflp_cfg_ip_cls_flow_key_get(handle, l3_class, &fcfg);
676 	if (rs & NPI_FFLP_ERROR) {
677 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
678 				" opt %x for class %d failed ",
679 				class_config, l3_class));
680 		return (NXGE_ERROR | rs);
681 	}
682 
683 	if (fcfg.use_proto)
684 		ccfg |= NXGE_CLASS_FLOW_USE_PROTO;
685 	if (fcfg.use_dport)
686 		ccfg |= NXGE_CLASS_FLOW_USE_DST_PORT;
687 	if (fcfg.use_sport)
688 		ccfg |= NXGE_CLASS_FLOW_USE_SRC_PORT;
689 	if (fcfg.use_daddr)
690 		ccfg |= NXGE_CLASS_FLOW_USE_IPDST;
691 	if (fcfg.use_saddr)
692 		ccfg |= NXGE_CLASS_FLOW_USE_IPSRC;
693 	if (fcfg.use_vlan)
694 		ccfg |= NXGE_CLASS_FLOW_USE_VLAN;
695 	if (fcfg.use_l2da)
696 		ccfg |= NXGE_CLASS_FLOW_USE_L2DA;
697 	if (fcfg.use_portnum)
698 		ccfg |= NXGE_CLASS_FLOW_USE_PORTNUM;
699 
700 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
701 		" nxge_cfg_ip_cls_flow_key_get %x", ccfg));
702 	*class_config = ccfg;
703 
704 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
705 		" <== nxge_cfg_ip_cls_flow_key_get"));
706 	return (NXGE_OK);
707 }
708 
709 static nxge_status_t
710 nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep, tcam_class_t class,
711 	uint32_t *class_config)
712 {
713 	npi_status_t rs = NPI_SUCCESS;
714 	tcam_key_cfg_t cfg;
715 	npi_handle_t handle;
716 	uint32_t ccfg = 0;
717 
718 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
719 
720 	bzero(&cfg, sizeof (tcam_key_cfg_t));
721 	handle = nxgep->npi_reg_handle;
722 
723 	rs = npi_fflp_cfg_ip_cls_tcam_key_get(handle, class, &cfg);
724 	if (rs & NPI_FFLP_ERROR) {
725 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
726 			" opt %x for class %d failed ",
727 			class_config, class));
728 		return (NXGE_ERROR | rs);
729 	}
730 	if (cfg.discard)
731 		ccfg |= NXGE_CLASS_DISCARD;
732 	if (cfg.lookup_enable)
733 		ccfg |= NXGE_CLASS_TCAM_LOOKUP;
734 	if (cfg.use_ip_daddr)
735 		ccfg |= NXGE_CLASS_TCAM_USE_SRC_ADDR;
736 	*class_config = ccfg;
737 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
738 			" ==> nxge_cfg_tcam_ip_class %x", ccfg));
739 	return (NXGE_OK);
740 }
741 
742 static nxge_status_t
743 nxge_cfg_tcam_ip_class(p_nxge_t nxgep, tcam_class_t class,
744 	uint32_t class_config)
745 {
746 	npi_status_t rs = NPI_SUCCESS;
747 	tcam_key_cfg_t cfg;
748 	npi_handle_t handle;
749 	p_nxge_class_pt_cfg_t p_class_cfgp;
750 
751 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
752 
753 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
754 	p_class_cfgp->class_cfg[class] = class_config;
755 
756 	bzero(&cfg, sizeof (tcam_key_cfg_t));
757 	handle = nxgep->npi_reg_handle;
758 	cfg.discard = 0;
759 	cfg.lookup_enable = 0;
760 	cfg.use_ip_daddr = 0;
761 	if (class_config & NXGE_CLASS_DISCARD)
762 		cfg.discard = 1;
763 	if (class_config & NXGE_CLASS_TCAM_LOOKUP)
764 		cfg.lookup_enable = 1;
765 	if (class_config & NXGE_CLASS_TCAM_USE_SRC_ADDR)
766 		cfg.use_ip_daddr = 1;
767 
768 	rs = npi_fflp_cfg_ip_cls_tcam_key(handle, class, &cfg);
769 	if (rs & NPI_FFLP_ERROR) {
770 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
771 			" opt %x for class %d failed ",
772 			class_config, class));
773 		return (NXGE_ERROR | rs);
774 	}
775 	return (NXGE_OK);
776 }
777 
778 nxge_status_t
779 nxge_fflp_set_hash1(p_nxge_t nxgep, uint32_t h1)
780 {
781 	npi_status_t rs = NPI_SUCCESS;
782 	npi_handle_t handle;
783 	p_nxge_class_pt_cfg_t p_class_cfgp;
784 
785 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h1"));
786 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
787 	p_class_cfgp->init_h1 = h1;
788 	handle = nxgep->npi_reg_handle;
789 	rs = npi_fflp_cfg_hash_h1poly(handle, h1);
790 	if (rs & NPI_FFLP_ERROR) {
791 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
792 			" nxge_fflp_init_h1 %x failed ", h1));
793 		return (NXGE_ERROR | rs);
794 	}
795 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h1"));
796 	return (NXGE_OK);
797 }
798 
799 nxge_status_t
800 nxge_fflp_set_hash2(p_nxge_t nxgep, uint16_t h2)
801 {
802 	npi_status_t rs = NPI_SUCCESS;
803 	npi_handle_t handle;
804 	p_nxge_class_pt_cfg_t p_class_cfgp;
805 
806 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h2"));
807 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
808 	p_class_cfgp->init_h2 = h2;
809 
810 	handle = nxgep->npi_reg_handle;
811 	rs = npi_fflp_cfg_hash_h2poly(handle, h2);
812 	if (rs & NPI_FFLP_ERROR) {
813 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
814 			" nxge_fflp_init_h2 %x failed ", h2));
815 		return (NXGE_ERROR | rs);
816 	}
817 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h2"));
818 	return (NXGE_OK);
819 }
820 
821 nxge_status_t
822 nxge_classify_init_sw(p_nxge_t nxgep)
823 {
824 	int alloc_size;
825 	nxge_classify_t *classify_ptr;
826 
827 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw"));
828 	classify_ptr = &nxgep->classifier;
829 
830 	if (classify_ptr->state & NXGE_FFLP_SW_INIT) {
831 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
832 			"nxge_classify_init_sw already init"));
833 		return (NXGE_OK);
834 	}
835 	/* Init SW structures */
836 	classify_ptr->tcam_size = TCAM_NIU_TCAM_MAX_ENTRY;
837 
838 	/* init data structures, based on HW type */
839 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) {
840 		classify_ptr->tcam_size = TCAM_NXGE_TCAM_MAX_ENTRY;
841 		/*
842 		 * check if fcram based classification is required and init the
843 		 * flow storage
844 		 */
845 	}
846 	alloc_size = sizeof (tcam_flow_spec_t) * classify_ptr->tcam_size;
847 	classify_ptr->tcam_entries = KMEM_ZALLOC(alloc_size, NULL);
848 
849 	/* Init defaults */
850 	/*
851 	 * add hacks required for HW shortcomings for example, code to handle
852 	 * fragmented packets
853 	 */
854 	nxge_init_h1_table();
855 	nxge_crc_ccitt_init();
856 	nxgep->classifier.tcam_location = nxgep->function_num;
857 	nxgep->classifier.fragment_bug = 1;
858 	classify_ptr->state |= NXGE_FFLP_SW_INIT;
859 
860 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_sw"));
861 	return (NXGE_OK);
862 }
863 
864 nxge_status_t
865 nxge_classify_exit_sw(p_nxge_t nxgep)
866 {
867 	int alloc_size;
868 	nxge_classify_t *classify_ptr;
869 	int fsize;
870 
871 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw"));
872 	classify_ptr = &nxgep->classifier;
873 
874 	fsize = sizeof (tcam_flow_spec_t);
875 	if (classify_ptr->tcam_entries) {
876 		alloc_size = fsize * classify_ptr->tcam_size;
877 		KMEM_FREE((void *) classify_ptr->tcam_entries, alloc_size);
878 	}
879 	nxgep->classifier.state = NULL;
880 
881 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw"));
882 	return (NXGE_OK);
883 }
884 
885 /*
886  * Figures out the location where the TCAM entry is
887  * to be inserted.
888  *
889  * The current implementation is just a place holder and it
890  * returns the next tcam location.
891  * The real location determining algorithm would consider
892  * the priority, partition etc ... before deciding which
893  * location to insert.
894  *
895  */
896 
897 /* ARGSUSED */
898 static tcam_location_t
899 nxge_get_tcam_location(p_nxge_t nxgep, uint8_t class)
900 {
901 	tcam_location_t location;
902 
903 	location = nxgep->classifier.tcam_location;
904 	nxgep->classifier.tcam_location = (location + nxgep->nports) %
905 		nxgep->classifier.tcam_size;
906 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
907 		"nxge_get_tcam_location: location %d next %d \n",
908 		location, nxgep->classifier.tcam_location));
909 	return (location);
910 }
911 
912 /*
913  * Figures out the RDC Group for the entry
914  *
915  * The current implementation is just a place holder and it
916  * returns 0.
917  * The real location determining algorithm would consider
918  * the partition etc ... before deciding w
919  *
920  */
921 
922 /* ARGSUSED */
923 static uint8_t
924 nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
925 {
926 	int use_port_rdc_grp = 0;
927 	uint8_t rdc_grp = 0;
928 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
929 	p_nxge_hw_pt_cfg_t p_cfgp;
930 	p_nxge_rdc_grp_t rdc_grp_p;
931 
932 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
933 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
934 	rdc_grp_p = &p_dma_cfgp->rdc_grps[use_port_rdc_grp];
935 	rdc_grp = p_cfgp->start_rdc_grpid;
936 
937 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
938 		"nxge_get_rdc_group: grp 0x%x real_grp %x grpp $%p\n",
939 		cookie, rdc_grp, rdc_grp_p));
940 	return (rdc_grp);
941 }
942 
943 /* ARGSUSED */
944 static uint8_t
945 nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, intptr_t cookie)
946 {
947 	return ((uint8_t)cookie);
948 }
949 
950 /* ARGSUSED */
951 static void
952 nxge_fill_tcam_entry_udp(p_nxge_t nxgep, flow_spec_t *flow_spec,
953 	tcam_entry_t *tcam_ptr)
954 {
955 	udpip4_spec_t *fspec_key;
956 	udpip4_spec_t *fspec_mask;
957 
958 	fspec_key = (udpip4_spec_t *)&flow_spec->uh.udpip4spec;
959 	fspec_mask = (udpip4_spec_t *)&flow_spec->um.udpip4spec;
960 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
961 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
962 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
963 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
964 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
965 		fspec_key->pdst, fspec_key->psrc);
966 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
967 		fspec_mask->pdst, fspec_mask->psrc);
968 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
969 		tcam_ptr->ip4_class_mask,
970 		TCAM_CLASS_UDP_IPV4);
971 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
972 		tcam_ptr->ip4_proto_mask,
973 		IPPROTO_UDP);
974 }
975 
976 static void
977 nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
978 	tcam_entry_t *tcam_ptr)
979 {
980 	udpip6_spec_t *fspec_key;
981 	udpip6_spec_t *fspec_mask;
982 	p_nxge_class_pt_cfg_t p_class_cfgp;
983 
984 	fspec_key = (udpip6_spec_t *)&flow_spec->uh.udpip6spec;
985 	fspec_mask = (udpip6_spec_t *)&flow_spec->um.udpip6spec;
986 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
987 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
988 			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
989 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
990 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
991 	} else {
992 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
993 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
994 	}
995 
996 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
997 		tcam_ptr->ip6_class_mask, TCAM_CLASS_UDP_IPV6);
998 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
999 		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_UDP);
1000 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1001 		fspec_key->pdst, fspec_key->psrc);
1002 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1003 		fspec_mask->pdst, fspec_mask->psrc);
1004 }
1005 
1006 /* ARGSUSED */
1007 static void
1008 nxge_fill_tcam_entry_tcp(p_nxge_t nxgep, flow_spec_t *flow_spec,
1009 	tcam_entry_t *tcam_ptr)
1010 {
1011 	tcpip4_spec_t *fspec_key;
1012 	tcpip4_spec_t *fspec_mask;
1013 
1014 	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
1015 	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
1016 
1017 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1018 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1019 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1020 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1021 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
1022 		fspec_key->pdst, fspec_key->psrc);
1023 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
1024 		fspec_mask->pdst, fspec_mask->psrc);
1025 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1026 		tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4);
1027 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1028 		tcam_ptr->ip4_proto_mask, IPPROTO_TCP);
1029 }
1030 
1031 /* ARGSUSED */
1032 static void
1033 nxge_fill_tcam_entry_sctp(p_nxge_t nxgep, flow_spec_t *flow_spec,
1034 	tcam_entry_t *tcam_ptr)
1035 {
1036 	tcpip4_spec_t *fspec_key;
1037 	tcpip4_spec_t *fspec_mask;
1038 
1039 	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
1040 	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
1041 
1042 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1043 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1044 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1045 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1046 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1047 		tcam_ptr->ip4_class_mask, TCAM_CLASS_SCTP_IPV4);
1048 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1049 		tcam_ptr->ip4_proto_mask, IPPROTO_SCTP);
1050 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
1051 		fspec_key->pdst, fspec_key->psrc);
1052 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
1053 		fspec_mask->pdst, fspec_mask->psrc);
1054 }
1055 
1056 static void
1057 nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1058 	tcam_entry_t *tcam_ptr)
1059 {
1060 	tcpip6_spec_t *fspec_key;
1061 	tcpip6_spec_t *fspec_mask;
1062 	p_nxge_class_pt_cfg_t p_class_cfgp;
1063 
1064 	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
1065 	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
1066 
1067 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1068 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1069 			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1070 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1071 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1072 	} else {
1073 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1074 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1075 	}
1076 
1077 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1078 		tcam_ptr->ip6_class_mask, TCAM_CLASS_TCP_IPV6);
1079 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1080 		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_TCP);
1081 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1082 		fspec_key->pdst, fspec_key->psrc);
1083 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1084 		fspec_mask->pdst, fspec_mask->psrc);
1085 }
1086 
1087 static void
1088 nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1089 	tcam_entry_t *tcam_ptr)
1090 {
1091 	tcpip6_spec_t *fspec_key;
1092 	tcpip6_spec_t *fspec_mask;
1093 	p_nxge_class_pt_cfg_t p_class_cfgp;
1094 
1095 	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
1096 	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
1097 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1098 
1099 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1100 			NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1101 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1102 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1103 	} else {
1104 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1105 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1106 	}
1107 
1108 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1109 		tcam_ptr->ip6_class_mask, TCAM_CLASS_SCTP_IPV6);
1110 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1111 		tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_SCTP);
1112 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1113 		fspec_key->pdst, fspec_key->psrc);
1114 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1115 		fspec_mask->pdst, fspec_mask->psrc);
1116 }
1117 
1118 nxge_status_t
1119 nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res,
1120 	uint32_t *H1, uint16_t *H2)
1121 {
1122 	flow_spec_t *flow_spec;
1123 	uint32_t class_cfg;
1124 	flow_template_t ft;
1125 	p_nxge_class_pt_cfg_t p_class_cfgp;
1126 
1127 	int ft_size = sizeof (flow_template_t);
1128 
1129 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_flow_get_hash"));
1130 
1131 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1132 	bzero((char *)&ft, ft_size);
1133 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1134 
1135 	switch (flow_spec->flow_type) {
1136 	case FSPEC_TCPIP4:
1137 		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_TCP_IPV4];
1138 		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1139 			ft.ip_proto = IPPROTO_TCP;
1140 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1141 			ft.ip4_saddr = flow_res->flow_spec.uh.tcpip4spec.ip4src;
1142 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1143 			ft.ip4_daddr = flow_res->flow_spec.uh.tcpip4spec.ip4dst;
1144 		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1145 			ft.ip_src_port = flow_res->flow_spec.uh.tcpip4spec.psrc;
1146 		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1147 			ft.ip_dst_port = flow_res->flow_spec.uh.tcpip4spec.pdst;
1148 		break;
1149 
1150 	case FSPEC_UDPIP4:
1151 		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV4];
1152 		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1153 			ft.ip_proto = IPPROTO_UDP;
1154 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1155 			ft.ip4_saddr = flow_res->flow_spec.uh.udpip4spec.ip4src;
1156 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1157 			ft.ip4_daddr = flow_res->flow_spec.uh.udpip4spec.ip4dst;
1158 		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1159 			ft.ip_src_port = flow_res->flow_spec.uh.udpip4spec.psrc;
1160 		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1161 			ft.ip_dst_port = flow_res->flow_spec.uh.udpip4spec.pdst;
1162 		break;
1163 
1164 	default:
1165 		return (NXGE_ERROR);
1166 	}
1167 
1168 	*H1 = nxge_compute_h1(p_class_cfgp->init_h1,
1169 		(uint32_t *)&ft, ft_size) & 0xfffff;
1170 	*H2 = nxge_compute_h2(p_class_cfgp->init_h2,
1171 		(uint8_t *)&ft, ft_size);
1172 
1173 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_flow_get_hash"));
1174 	return (NXGE_OK);
1175 }
1176 
1177 nxge_status_t
1178 nxge_add_fcram_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1179 {
1180 	uint32_t H1;
1181 	uint16_t H2;
1182 	nxge_status_t status = NXGE_OK;
1183 
1184 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_fcram_entry"));
1185 	status = nxge_flow_get_hash(nxgep, flow_res, &H1, &H2);
1186 	if (status != NXGE_OK) {
1187 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1188 			" nxge_add_fcram_entry failed "));
1189 		return (status);
1190 	}
1191 
1192 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_fcram_entry"));
1193 	return (NXGE_OK);
1194 }
1195 
1196 /*
1197  * Already decided this flow goes into the tcam
1198  */
1199 
1200 nxge_status_t
1201 nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1202 {
1203 	npi_handle_t handle;
1204 	intptr_t channel_cookie;
1205 	intptr_t flow_cookie;
1206 	flow_spec_t *flow_spec;
1207 	npi_status_t rs = NPI_SUCCESS;
1208 	tcam_entry_t tcam_ptr;
1209 	tcam_location_t location = 0;
1210 	uint8_t offset, rdc_grp;
1211 	p_nxge_hw_list_t hw_p;
1212 
1213 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry"));
1214 	handle = nxgep->npi_reg_handle;
1215 
1216 	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1217 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1218 	flow_cookie = flow_res->flow_cookie;
1219 	channel_cookie = flow_res->channel_cookie;
1220 
1221 	switch (flow_spec->flow_type) {
1222 	case FSPEC_TCPIP4:
1223 		nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr);
1224 		location = nxge_get_tcam_location(nxgep,
1225 			TCAM_CLASS_TCP_IPV4);
1226 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4,
1227 			flow_cookie);
1228 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4,
1229 			channel_cookie);
1230 		break;
1231 
1232 	case FSPEC_UDPIP4:
1233 		nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr);
1234 		location = nxge_get_tcam_location(nxgep,
1235 			TCAM_CLASS_UDP_IPV4);
1236 		rdc_grp = nxge_get_rdc_group(nxgep,
1237 			TCAM_CLASS_UDP_IPV4,
1238 			flow_cookie);
1239 		offset = nxge_get_rdc_offset(nxgep,
1240 			TCAM_CLASS_UDP_IPV4,
1241 			channel_cookie);
1242 		break;
1243 
1244 	case FSPEC_TCPIP6:
1245 		nxge_fill_tcam_entry_tcp_ipv6(nxgep,
1246 			flow_spec, &tcam_ptr);
1247 		location = nxge_get_tcam_location(nxgep,
1248 			TCAM_CLASS_TCP_IPV6);
1249 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6,
1250 			flow_cookie);
1251 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6,
1252 			channel_cookie);
1253 		break;
1254 
1255 	case FSPEC_UDPIP6:
1256 		nxge_fill_tcam_entry_udp_ipv6(nxgep,
1257 			flow_spec, &tcam_ptr);
1258 		location = nxge_get_tcam_location(nxgep,
1259 			TCAM_CLASS_UDP_IPV6);
1260 		rdc_grp = nxge_get_rdc_group(nxgep,
1261 			TCAM_CLASS_UDP_IPV6,
1262 			channel_cookie);
1263 		offset = nxge_get_rdc_offset(nxgep,
1264 			TCAM_CLASS_UDP_IPV6,
1265 			flow_cookie);
1266 		break;
1267 
1268 	case FSPEC_SCTPIP4:
1269 		nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr);
1270 		location = nxge_get_tcam_location(nxgep,
1271 			TCAM_CLASS_SCTP_IPV4);
1272 		rdc_grp = nxge_get_rdc_group(nxgep,
1273 			TCAM_CLASS_SCTP_IPV4,
1274 			channel_cookie);
1275 		offset = nxge_get_rdc_offset(nxgep,
1276 			TCAM_CLASS_SCTP_IPV4,
1277 			flow_cookie);
1278 		break;
1279 
1280 	case FSPEC_SCTPIP6:
1281 		nxge_fill_tcam_entry_sctp_ipv6(nxgep,
1282 			flow_spec, &tcam_ptr);
1283 		location = nxge_get_tcam_location(nxgep,
1284 			TCAM_CLASS_SCTP_IPV4);
1285 		rdc_grp = nxge_get_rdc_group(nxgep,
1286 			TCAM_CLASS_SCTP_IPV6,
1287 			channel_cookie);
1288 		offset = nxge_get_rdc_offset(nxgep,
1289 			TCAM_CLASS_SCTP_IPV6,
1290 			flow_cookie);
1291 		break;
1292 
1293 	default:
1294 		return (NXGE_OK);
1295 	}
1296 
1297 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1298 		" nxge_add_tcam_entry write"
1299 		" for location %d offset %d", location, offset));
1300 
1301 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1302 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1303 			" nxge_add_tcam_entry: common hardware not set",
1304 			nxgep->niu_type));
1305 		return (NXGE_ERROR);
1306 	}
1307 
1308 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1309 	rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr);
1310 
1311 	if (rs & NPI_FFLP_ERROR) {
1312 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1313 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1314 			" nxge_add_tcam_entry write"
1315 			" failed for location %d", location));
1316 		return (NXGE_ERROR | rs);
1317 	}
1318 
1319 	tcam_ptr.match_action.value = 0;
1320 	tcam_ptr.match_action.bits.ldw.rdctbl = rdc_grp;
1321 	tcam_ptr.match_action.bits.ldw.offset = offset;
1322 	tcam_ptr.match_action.bits.ldw.tres =
1323 		TRES_TERM_OVRD_L2RDC;
1324 	if (channel_cookie == -1)
1325 		tcam_ptr.match_action.bits.ldw.disc = 1;
1326 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1327 		location, tcam_ptr.match_action.value);
1328 	if (rs & NPI_FFLP_ERROR) {
1329 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1330 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1331 			" nxge_add_tcam_entry write"
1332 			" failed for ASC RAM location %d", location));
1333 		return (NXGE_ERROR | rs);
1334 	}
1335 	bcopy((void *) &tcam_ptr,
1336 		(void *) &nxgep->classifier.tcam_entries[location].tce,
1337 		sizeof (tcam_entry_t));
1338 
1339 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1340 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry"));
1341 	return (NXGE_OK);
1342 }
1343 
1344 static nxge_status_t
1345 nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)
1346 {
1347 	tcam_entry_t tcam_ptr;
1348 	tcam_location_t location;
1349 	uint8_t class;
1350 	uint32_t class_config;
1351 	npi_handle_t handle;
1352 	npi_status_t rs = NPI_SUCCESS;
1353 	p_nxge_hw_list_t hw_p;
1354 	nxge_status_t status = NXGE_OK;
1355 
1356 	handle = nxgep->npi_reg_handle;
1357 	class = 0;
1358 	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1359 	tcam_ptr.ip4_noport_key = 1;
1360 	tcam_ptr.ip4_noport_mask = 1;
1361 	location = nxgep->function_num;
1362 	nxgep->classifier.fragment_bug_location = location;
1363 
1364 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1365 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1366 			" nxge_tcam_handle_ip_fragment:"
1367 			" common hardware not set",
1368 			nxgep->niu_type));
1369 		return (NXGE_ERROR);
1370 	}
1371 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1372 	rs = npi_fflp_tcam_entry_write(handle,
1373 		location, &tcam_ptr);
1374 
1375 	if (rs & NPI_FFLP_ERROR) {
1376 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1377 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1378 			" nxge_tcam_handle_ip_fragment "
1379 			" tcam_entry write"
1380 			" failed for location %d", location));
1381 		return (NXGE_ERROR);
1382 	}
1383 	tcam_ptr.match_action.bits.ldw.rdctbl = nxgep->class_config.mac_rdcgrp;
1384 	tcam_ptr.match_action.bits.ldw.offset = 0;	/* use the default */
1385 	tcam_ptr.match_action.bits.ldw.tres =
1386 		TRES_TERM_USE_OFFSET;
1387 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1388 		location, tcam_ptr.match_action.value);
1389 
1390 	if (rs & NPI_FFLP_ERROR) {
1391 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1392 		NXGE_DEBUG_MSG((nxgep,
1393 			FFLP_CTL,
1394 			" nxge_tcam_handle_ip_fragment "
1395 			" tcam_entry write"
1396 			" failed for ASC RAM location %d", location));
1397 		return (NXGE_ERROR);
1398 	}
1399 	bcopy((void *) &tcam_ptr,
1400 		(void *) &nxgep->classifier.tcam_entries[location].tce,
1401 		sizeof (tcam_entry_t));
1402 	for (class = TCAM_CLASS_TCP_IPV4;
1403 		class <= TCAM_CLASS_SCTP_IPV6; class++) {
1404 		class_config = nxgep->class_config.class_cfg[class];
1405 		class_config |= NXGE_CLASS_TCAM_LOOKUP;
1406 		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1407 
1408 		if (status & NPI_FFLP_ERROR) {
1409 			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1410 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1411 				"nxge_tcam_handle_ip_fragment "
1412 				"nxge_fflp_ip_class_config failed "
1413 				" class %d config %x ", class, class_config));
1414 			return (NXGE_ERROR);
1415 		}
1416 	}
1417 
1418 	rs = npi_fflp_cfg_tcam_enable(handle);
1419 	if (rs & NPI_FFLP_ERROR) {
1420 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1421 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1422 			"nxge_tcam_handle_ip_fragment "
1423 			" nxge_fflp_config_tcam_enable failed"));
1424 		return (NXGE_ERROR);
1425 	}
1426 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1427 	return (NXGE_OK);
1428 }
1429 
1430 /* ARGSUSED */
1431 static int
1432 nxge_flow_need_hash_lookup(p_nxge_t nxgep, flow_resource_t *flow_res)
1433 {
1434 	return (0);
1435 }
1436 
1437 nxge_status_t
1438 nxge_add_flow(p_nxge_t nxgep, flow_resource_t *flow_res)
1439 {
1440 
1441 	int insert_hash = 0;
1442 	nxge_status_t status = NXGE_OK;
1443 
1444 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep->niu_type)) {
1445 		/* determine whether to do TCAM or Hash flow */
1446 		insert_hash = nxge_flow_need_hash_lookup(nxgep, flow_res);
1447 	}
1448 	if (insert_hash) {
1449 		status = nxge_add_fcram_entry(nxgep, flow_res);
1450 	} else {
1451 		status = nxge_add_tcam_entry(nxgep, flow_res);
1452 	}
1453 	return (status);
1454 }
1455 
1456 void
1457 nxge_put_tcam(p_nxge_t nxgep, p_mblk_t mp)
1458 {
1459 	flow_resource_t *fs;
1460 
1461 	fs = (flow_resource_t *)mp->b_rptr;
1462 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1463 		"nxge_put_tcam addr fs $%p  type %x offset %x",
1464 		fs, fs->flow_spec.flow_type, fs->channel_cookie));
1465 	(void) nxge_add_tcam_entry(nxgep, fs);
1466 }
1467 
1468 nxge_status_t
1469 nxge_fflp_config_tcam_enable(p_nxge_t nxgep)
1470 {
1471 	npi_handle_t handle = nxgep->npi_reg_handle;
1472 	npi_status_t rs = NPI_SUCCESS;
1473 
1474 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_config_tcam_enable"));
1475 	rs = npi_fflp_cfg_tcam_enable(handle);
1476 	if (rs & NPI_FFLP_ERROR) {
1477 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1478 			" nxge_fflp_config_tcam_enable failed"));
1479 		return (NXGE_ERROR | rs);
1480 	}
1481 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_config_tcam_enable"));
1482 	return (NXGE_OK);
1483 }
1484 
1485 nxge_status_t
1486 nxge_fflp_config_tcam_disable(p_nxge_t nxgep)
1487 {
1488 	npi_handle_t handle = nxgep->npi_reg_handle;
1489 	npi_status_t rs = NPI_SUCCESS;
1490 
1491 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1492 		" ==> nxge_fflp_config_tcam_disable"));
1493 	rs = npi_fflp_cfg_tcam_disable(handle);
1494 	if (rs & NPI_FFLP_ERROR) {
1495 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1496 				" nxge_fflp_config_tcam_disable failed"));
1497 		return (NXGE_ERROR | rs);
1498 	}
1499 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1500 		" <== nxge_fflp_config_tcam_disable"));
1501 	return (NXGE_OK);
1502 }
1503 
1504 nxge_status_t
1505 nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)
1506 {
1507 	npi_handle_t handle = nxgep->npi_reg_handle;
1508 	npi_status_t rs = NPI_SUCCESS;
1509 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1510 	p_nxge_hw_pt_cfg_t p_cfgp;
1511 	uint8_t partition;
1512 
1513 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1514 		" ==> nxge_fflp_config_hash_lookup_enable"));
1515 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1516 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1517 
1518 	for (partition = p_cfgp->start_rdc_grpid;
1519 		partition < p_cfgp->max_rdc_grpids; partition++) {
1520 		rs = npi_fflp_cfg_fcram_partition_enable(handle, partition);
1521 		if (rs != NPI_SUCCESS) {
1522 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1523 				" nxge_fflp_config_hash_lookup_enable"
1524 				"failed FCRAM partition"
1525 				" enable for partition %d ", partition));
1526 			return (NXGE_ERROR | rs);
1527 		}
1528 	}
1529 
1530 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1531 		" <== nxge_fflp_config_hash_lookup_enable"));
1532 	return (NXGE_OK);
1533 }
1534 
1535 nxge_status_t
1536 nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)
1537 {
1538 	npi_handle_t handle = nxgep->npi_reg_handle;
1539 	npi_status_t rs = NPI_SUCCESS;
1540 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1541 	p_nxge_hw_pt_cfg_t p_cfgp;
1542 	uint8_t partition;
1543 
1544 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1545 		" ==> nxge_fflp_config_hash_lookup_disable"));
1546 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1547 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1548 
1549 	for (partition = p_cfgp->start_rdc_grpid;
1550 		partition < p_cfgp->max_rdc_grpids; partition++) {
1551 		rs = npi_fflp_cfg_fcram_partition_disable(handle,
1552 			partition);
1553 		if (rs != NPI_SUCCESS) {
1554 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1555 				" nxge_fflp_config_hash_lookup_disable"
1556 				" failed FCRAM partition"
1557 				" disable for partition %d ", partition));
1558 			return (NXGE_ERROR | rs);
1559 		}
1560 	}
1561 
1562 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1563 		" <== nxge_fflp_config_hash_lookup_disable"));
1564 	return (NXGE_OK);
1565 }
1566 
1567 nxge_status_t
1568 nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)
1569 {
1570 	npi_handle_t handle = nxgep->npi_reg_handle;
1571 	npi_status_t rs = NPI_SUCCESS;
1572 
1573 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1574 		" ==> nxge_fflp_config_llc_snap_enable"));
1575 	rs = npi_fflp_cfg_llcsnap_enable(handle);
1576 	if (rs & NPI_FFLP_ERROR) {
1577 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1578 			" nxge_fflp_config_llc_snap_enable failed"));
1579 		return (NXGE_ERROR | rs);
1580 	}
1581 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1582 		" <== nxge_fflp_config_llc_snap_enable"));
1583 	return (NXGE_OK);
1584 }
1585 
1586 nxge_status_t
1587 nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)
1588 {
1589 	npi_handle_t handle = nxgep->npi_reg_handle;
1590 	npi_status_t rs = NPI_SUCCESS;
1591 
1592 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1593 		" ==> nxge_fflp_config_llc_snap_disable"));
1594 	rs = npi_fflp_cfg_llcsnap_disable(handle);
1595 	if (rs & NPI_FFLP_ERROR) {
1596 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1597 			" nxge_fflp_config_llc_snap_disable failed"));
1598 		return (NXGE_ERROR | rs);
1599 	}
1600 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1601 		" <== nxge_fflp_config_llc_snap_disable"));
1602 	return (NXGE_OK);
1603 }
1604 
1605 nxge_status_t
1606 nxge_fflp_ip_usr_class_config(p_nxge_t nxgep, tcam_class_t class,
1607 	uint32_t config)
1608 {
1609 	npi_status_t rs = NPI_SUCCESS;
1610 	npi_handle_t handle = nxgep->npi_reg_handle;
1611 	uint8_t tos, tos_mask, proto, ver = 0;
1612 	uint8_t class_enable = 0;
1613 
1614 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_usr_class_config"));
1615 
1616 	tos = (config & NXGE_CLASS_CFG_IP_TOS_MASK) >>
1617 		NXGE_CLASS_CFG_IP_TOS_SHIFT;
1618 	tos_mask = (config & NXGE_CLASS_CFG_IP_TOS_MASK_MASK) >>
1619 		NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT;
1620 	proto = (config & NXGE_CLASS_CFG_IP_PROTO_MASK) >>
1621 		NXGE_CLASS_CFG_IP_PROTO_SHIFT;
1622 	if (config & NXGE_CLASS_CFG_IP_IPV6_MASK)
1623 		ver = 1;
1624 	if (config & NXGE_CLASS_CFG_IP_ENABLE_MASK)
1625 		class_enable = 1;
1626 	rs = npi_fflp_cfg_ip_usr_cls_set(handle, class, tos, tos_mask,
1627 		proto, ver);
1628 	if (rs & NPI_FFLP_ERROR) {
1629 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1630 			" nxge_fflp_ip_usr_class_config"
1631 			" for class %d failed ", class));
1632 		return (NXGE_ERROR | rs);
1633 	}
1634 	if (class_enable)
1635 		rs = npi_fflp_cfg_ip_usr_cls_enable(handle, class);
1636 	else
1637 		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
1638 
1639 	if (rs & NPI_FFLP_ERROR) {
1640 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1641 			" nxge_fflp_ip_usr_class_config"
1642 			" TCAM enable/disable for class %d failed ", class));
1643 		return (NXGE_ERROR | rs);
1644 	}
1645 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_usr_class_config"));
1646 	return (NXGE_OK);
1647 }
1648 
1649 nxge_status_t
1650 nxge_fflp_ip_class_config(p_nxge_t nxgep, tcam_class_t class, uint32_t config)
1651 {
1652 	uint32_t class_config;
1653 	nxge_status_t t_status = NXGE_OK;
1654 	nxge_status_t f_status = NXGE_OK;
1655 	p_nxge_class_pt_cfg_t p_class_cfgp;
1656 
1657 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1658 
1659 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1660 	class_config = p_class_cfgp->class_cfg[class];
1661 
1662 	if (class_config != config) {
1663 		p_class_cfgp->class_cfg[class] = config;
1664 		class_config = config;
1665 	}
1666 
1667 	t_status = nxge_cfg_tcam_ip_class(nxgep, class, class_config);
1668 	f_status = nxge_cfg_ip_cls_flow_key(nxgep, class, class_config);
1669 
1670 	if (t_status & NPI_FFLP_ERROR) {
1671 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1672 			" nxge_fflp_ip_class_config %x"
1673 			" for class %d tcam failed", config, class));
1674 		return (t_status);
1675 	}
1676 	if (f_status & NPI_FFLP_ERROR) {
1677 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1678 			" nxge_fflp_ip_class_config %x"
1679 			" for class %d flow key failed", config, class));
1680 		return (f_status);
1681 	}
1682 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1683 	return (NXGE_OK);
1684 }
1685 
1686 nxge_status_t
1687 nxge_fflp_ip_class_config_get(p_nxge_t nxgep, tcam_class_t class,
1688 	uint32_t *config)
1689 {
1690 	uint32_t t_class_config, f_class_config;
1691 	int t_status = NXGE_OK;
1692 	int f_status = NXGE_OK;
1693 
1694 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1695 
1696 	t_class_config = f_class_config = 0;
1697 	t_status = nxge_cfg_tcam_ip_class_get(nxgep, class, &t_class_config);
1698 	f_status = nxge_cfg_ip_cls_flow_key_get(nxgep, class, &f_class_config);
1699 
1700 	if (t_status & NPI_FFLP_ERROR) {
1701 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1702 			" nxge_fflp_ip_class_config_get  "
1703 			" for class %d tcam failed", class));
1704 		return (t_status);
1705 	}
1706 
1707 	if (f_status & NPI_FFLP_ERROR) {
1708 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1709 			" nxge_fflp_ip_class_config_get  "
1710 			" for class %d flow key failed", class));
1711 		return (f_status);
1712 	}
1713 
1714 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1715 		" nxge_fflp_ip_class_config tcam %x flow %x",
1716 		t_class_config, f_class_config));
1717 
1718 	*config = t_class_config | f_class_config;
1719 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config_get"));
1720 	return (NXGE_OK);
1721 }
1722 
1723 nxge_status_t
1724 nxge_fflp_ip_class_config_all(p_nxge_t nxgep)
1725 {
1726 	uint32_t class_config;
1727 	tcam_class_t class;
1728 
1729 #ifdef	NXGE_DEBUG
1730 	int status = NXGE_OK;
1731 #endif
1732 
1733 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_class_config"));
1734 	for (class = TCAM_CLASS_TCP_IPV4;
1735 		class <= TCAM_CLASS_SCTP_IPV6; class++) {
1736 		class_config = nxgep->class_config.class_cfg[class];
1737 #ifndef	NXGE_DEBUG
1738 		(void) nxge_fflp_ip_class_config(nxgep, class, class_config);
1739 #else
1740 		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1741 		if (status & NPI_FFLP_ERROR) {
1742 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1743 				"nxge_fflp_ip_class_config failed "
1744 				" class %d config %x ",
1745 				class, class_config));
1746 		}
1747 #endif
1748 	}
1749 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1750 	return (NXGE_OK);
1751 }
1752 
1753 nxge_status_t
1754 nxge_fflp_config_vlan_table(p_nxge_t nxgep, uint16_t vlan_id)
1755 {
1756 	uint8_t port, rdc_grp;
1757 	npi_handle_t handle;
1758 	npi_status_t rs = NPI_SUCCESS;
1759 	uint8_t priority = 1;
1760 	p_nxge_mv_cfg_t vlan_table;
1761 	p_nxge_class_pt_cfg_t p_class_cfgp;
1762 	p_nxge_hw_list_t hw_p;
1763 
1764 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_config_vlan_table"));
1765 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1766 	handle = nxgep->npi_reg_handle;
1767 	vlan_table = p_class_cfgp->vlan_tbl;
1768 	port = nxgep->function_num;
1769 
1770 	if (vlan_table[vlan_id].flag == 0) {
1771 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1772 			" nxge_fflp_config_vlan_table"
1773 			" vlan id is not configured %d", vlan_id));
1774 		return (NXGE_ERROR);
1775 	}
1776 
1777 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1778 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1779 			" nxge_fflp_config_vlan_table:"
1780 			" common hardware not set", nxgep->niu_type));
1781 		return (NXGE_ERROR);
1782 	}
1783 	MUTEX_ENTER(&hw_p->nxge_vlan_lock);
1784 	rdc_grp = vlan_table[vlan_id].rdctbl;
1785 	rs = npi_fflp_cfg_enet_vlan_table_assoc(handle,
1786 		port, vlan_id,
1787 		rdc_grp, priority);
1788 
1789 	MUTEX_EXIT(&hw_p->nxge_vlan_lock);
1790 	if (rs & NPI_FFLP_ERROR) {
1791 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1792 			"nxge_fflp_config_vlan_table failed "
1793 			" Port %d vlan_id %d rdc_grp %d",
1794 			port, vlan_id, rdc_grp));
1795 		return (NXGE_ERROR | rs);
1796 	}
1797 
1798 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_config_vlan_table"));
1799 	return (NXGE_OK);
1800 }
1801 
1802 nxge_status_t
1803 nxge_fflp_update_hw(p_nxge_t nxgep)
1804 {
1805 	nxge_status_t status = NXGE_OK;
1806 	p_nxge_param_t pa;
1807 	uint64_t cfgd_vlans;
1808 	uint64_t *val_ptr;
1809 	int i;
1810 	int num_macs;
1811 	uint8_t alt_mac;
1812 	nxge_param_map_t *p_map;
1813 	p_nxge_mv_cfg_t vlan_table;
1814 	p_nxge_class_pt_cfg_t p_class_cfgp;
1815 	p_nxge_dma_pt_cfg_t p_all_cfgp;
1816 	p_nxge_hw_pt_cfg_t p_cfgp;
1817 
1818 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_update_hw"));
1819 
1820 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1821 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1822 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1823 
1824 	status = nxge_fflp_set_hash1(nxgep, p_class_cfgp->init_h1);
1825 	if (status != NXGE_OK) {
1826 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1827 			"nxge_fflp_set_hash1 Failed"));
1828 		return (NXGE_ERROR);
1829 	}
1830 
1831 	status = nxge_fflp_set_hash2(nxgep, p_class_cfgp->init_h2);
1832 	if (status != NXGE_OK) {
1833 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1834 			"nxge_fflp_set_hash2 Failed"));
1835 		return (NXGE_ERROR);
1836 	}
1837 	vlan_table = p_class_cfgp->vlan_tbl;
1838 
1839 	/* configure vlan tables */
1840 	pa = (p_nxge_param_t)&nxgep->param_arr[param_vlan_2rdc_grp];
1841 	val_ptr = (uint64_t *)pa->value;
1842 	cfgd_vlans = ((pa->type & NXGE_PARAM_ARRAY_CNT_MASK) >>
1843 		NXGE_PARAM_ARRAY_CNT_SHIFT);
1844 
1845 	for (i = 0; i < cfgd_vlans; i++) {
1846 		p_map = (nxge_param_map_t *)&val_ptr[i];
1847 		if (vlan_table[p_map->param_id].flag) {
1848 			status = nxge_fflp_config_vlan_table(nxgep,
1849 				p_map->param_id);
1850 			if (status != NXGE_OK) {
1851 				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1852 					"nxge_fflp_config_vlan_table Failed"));
1853 				return (NXGE_ERROR);
1854 			}
1855 		}
1856 	}
1857 
1858 	/* config MAC addresses */
1859 	num_macs = p_cfgp->max_macs;
1860 	pa = (p_nxge_param_t)&nxgep->param_arr[param_mac_2rdc_grp];
1861 	val_ptr = (uint64_t *)pa->value;
1862 
1863 	for (alt_mac = 0; alt_mac < num_macs; alt_mac++) {
1864 		if (p_class_cfgp->mac_host_info[alt_mac].flag) {
1865 			status = nxge_logical_mac_assign_rdc_table(nxgep,
1866 				alt_mac);
1867 			if (status != NXGE_OK) {
1868 				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1869 					"nxge_logical_mac_assign_rdc_table"
1870 					" Failed"));
1871 				return (NXGE_ERROR);
1872 			}
1873 		}
1874 	}
1875 
1876 	/* Config Hash values */
1877 	/* config classess */
1878 	status = nxge_fflp_ip_class_config_all(nxgep);
1879 	if (status != NXGE_OK) {
1880 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1881 			"nxge_fflp_ip_class_config_all Failed"));
1882 		return (NXGE_ERROR);
1883 	}
1884 	return (NXGE_OK);
1885 }
1886 
1887 nxge_status_t
1888 nxge_classify_init_hw(p_nxge_t nxgep)
1889 {
1890 	nxge_status_t status = NXGE_OK;
1891 
1892 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_hw"));
1893 
1894 	if (nxgep->classifier.state & NXGE_FFLP_HW_INIT) {
1895 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1896 			"nxge_classify_init_hw already init"));
1897 		return (NXGE_OK);
1898 	}
1899 
1900 	/* Now do a real configuration */
1901 	status = nxge_fflp_update_hw(nxgep);
1902 	if (status != NXGE_OK) {
1903 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1904 			"nxge_fflp_update_hw failed"));
1905 		return (NXGE_ERROR);
1906 	}
1907 
1908 	/* Init RDC tables? ? who should do that? rxdma or fflp ? */
1909 	/* attach rdc table to the MAC port. */
1910 	status = nxge_main_mac_assign_rdc_table(nxgep);
1911 	if (status != NXGE_OK) {
1912 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1913 				"nxge_main_mac_assign_rdc_table failed"));
1914 		return (NXGE_ERROR);
1915 	}
1916 
1917 	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
1918 	if (status != NXGE_OK) {
1919 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1920 			"nxge_multicast_mac_assign_rdc_table failed"));
1921 		return (NXGE_ERROR);
1922 	}
1923 
1924 	status = nxge_tcam_handle_ip_fragment(nxgep);
1925 	if (status != NXGE_OK) {
1926 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1927 			"nxge_tcam_handle_ip_fragment failed"));
1928 		return (NXGE_ERROR);
1929 	}
1930 
1931 	nxgep->classifier.state |= NXGE_FFLP_HW_INIT;
1932 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_hw"));
1933 	return (NXGE_OK);
1934 }
1935 
1936 nxge_status_t
1937 nxge_fflp_handle_sys_errors(p_nxge_t nxgep)
1938 {
1939 	npi_handle_t handle;
1940 	p_nxge_fflp_stats_t statsp;
1941 	uint8_t portn, rdc_grp;
1942 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1943 	p_nxge_hw_pt_cfg_t p_cfgp;
1944 	vlan_par_err_t vlan_err;
1945 	tcam_err_t tcam_err;
1946 	hash_lookup_err_log1_t fcram1_err;
1947 	hash_lookup_err_log2_t fcram2_err;
1948 	hash_tbl_data_log_t fcram_err;
1949 
1950 	handle = nxgep->npi_handle;
1951 	statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
1952 	portn = nxgep->mac.portnum;
1953 
1954 	/*
1955 	 * need to read the fflp error registers to figure out what the error
1956 	 * is
1957 	 */
1958 	npi_fflp_vlan_error_get(handle, &vlan_err);
1959 	npi_fflp_tcam_error_get(handle, &tcam_err);
1960 
1961 	if (vlan_err.bits.ldw.m_err || vlan_err.bits.ldw.err) {
1962 		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1963 			" vlan table parity error on port %d"
1964 			" addr: 0x%x data: 0x%x",
1965 			portn, vlan_err.bits.ldw.addr,
1966 			vlan_err.bits.ldw.data));
1967 		statsp->vlan_parity_err++;
1968 
1969 		if (vlan_err.bits.ldw.m_err) {
1970 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1971 				" vlan table multiple errors on port %d",
1972 				portn));
1973 		}
1974 		statsp->errlog.vlan = (uint32_t)vlan_err.value;
1975 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
1976 			NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR);
1977 		npi_fflp_vlan_error_clear(handle);
1978 	}
1979 
1980 	if (tcam_err.bits.ldw.err) {
1981 		if (tcam_err.bits.ldw.p_ecc != 0) {
1982 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1983 				" TCAM ECC error on port %d"
1984 				" TCAM entry: 0x%x syndrome: 0x%x",
1985 				portn, tcam_err.bits.ldw.addr,
1986 				tcam_err.bits.ldw.syndrome));
1987 			statsp->tcam_ecc_err++;
1988 		} else {
1989 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1990 				" TCAM Parity error on port %d"
1991 				" addr: 0x%x parity value: 0x%x",
1992 				portn, tcam_err.bits.ldw.addr,
1993 				tcam_err.bits.ldw.syndrome));
1994 			statsp->tcam_parity_err++;
1995 		}
1996 
1997 		if (tcam_err.bits.ldw.mult) {
1998 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
1999 				" TCAM Multiple errors on port %d", portn));
2000 		} else {
2001 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2002 					" TCAM PIO error on port %d",
2003 					portn));
2004 		}
2005 
2006 		statsp->errlog.tcam = (uint32_t)tcam_err.value;
2007 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2008 			NXGE_FM_EREPORT_FFLP_TCAM_ERR);
2009 		npi_fflp_tcam_error_clear(handle);
2010 	}
2011 
2012 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2013 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
2014 
2015 	for (rdc_grp = p_cfgp->start_rdc_grpid;
2016 		rdc_grp < p_cfgp->max_rdc_grpids; rdc_grp++) {
2017 		npi_fflp_fcram_error_get(handle, &fcram_err, rdc_grp);
2018 		if (fcram_err.bits.ldw.pio_err) {
2019 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2020 				" FCRAM PIO ECC error on port %d"
2021 				" rdc group: %d Hash Table addr: 0x%x"
2022 				" syndrome: 0x%x",
2023 				portn, rdc_grp,
2024 				fcram_err.bits.ldw.fcram_addr,
2025 				fcram_err.bits.ldw.syndrome));
2026 			statsp->hash_pio_err[rdc_grp]++;
2027 			statsp->errlog.hash_pio[rdc_grp] =
2028 				(uint32_t)fcram_err.value;
2029 			NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2030 				NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR);
2031 			npi_fflp_fcram_error_clear(handle, rdc_grp);
2032 		}
2033 	}
2034 
2035 	npi_fflp_fcram_error_log1_get(handle, &fcram1_err);
2036 	if (fcram1_err.bits.ldw.ecc_err) {
2037 		char *multi_str = "";
2038 		char *multi_bit_str = "";
2039 
2040 		npi_fflp_fcram_error_log2_get(handle, &fcram2_err);
2041 		if (fcram1_err.bits.ldw.mult_lk) {
2042 			multi_str = "multiple";
2043 		}
2044 		if (fcram1_err.bits.ldw.mult_bit) {
2045 			multi_bit_str = "multiple bits";
2046 		}
2047 		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2048 			" FCRAM %s lookup %s ECC error on port %d"
2049 			" H1: 0x%x Subarea: 0x%x Syndrome: 0x%x",
2050 			multi_str, multi_bit_str, portn,
2051 			fcram2_err.bits.ldw.h1,
2052 			fcram2_err.bits.ldw.subarea,
2053 			fcram2_err.bits.ldw.syndrome));
2054 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2055 			NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR);
2056 	}
2057 	statsp->errlog.hash_lookup1 = (uint32_t)fcram1_err.value;
2058 	statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value;
2059 	return (NXGE_OK);
2060 }
2061