xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_fflp.c (revision 4df55fde)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <npi_fflp.h>
27 #include <npi_mac.h>
28 #include <nxge_defs.h>
29 #include <nxge_flow.h>
30 #include <nxge_fflp.h>
31 #include <nxge_impl.h>
32 #include <nxge_fflp_hash.h>
33 #include <nxge_common.h>
34 
35 
36 /*
37  * Function prototypes
38  */
39 static nxge_status_t nxge_fflp_vlan_tbl_clear_all(p_nxge_t);
40 static nxge_status_t nxge_fflp_tcam_invalidate_all(p_nxge_t);
41 static nxge_status_t nxge_fflp_tcam_init(p_nxge_t);
42 static nxge_status_t nxge_fflp_fcram_invalidate_all(p_nxge_t);
43 static nxge_status_t nxge_fflp_fcram_init(p_nxge_t);
44 static int nxge_flow_need_hash_lookup(p_nxge_t, flow_resource_t *);
45 static void nxge_fill_tcam_entry_tcp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
46 static void nxge_fill_tcam_entry_udp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
47 static void nxge_fill_tcam_entry_sctp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
48 static void nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t, flow_spec_t *,
49 	tcam_entry_t *);
50 static void nxge_fill_tcam_entry_udp_ipv6(p_nxge_t, flow_spec_t *,
51 	tcam_entry_t *);
52 static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *,
53 	tcam_entry_t *);
54 static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, uint64_t);
55 static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, uint64_t);
56 static uint16_t nxge_tcam_get_index(p_nxge_t, uint16_t);
57 static uint32_t nxge_tcam_cls_to_flow(uint32_t);
58 static uint8_t nxge_iptun_pkt_type_to_pid(uint8_t);
59 static npi_status_t nxge_set_iptun_usr_cls_reg(p_nxge_t, uint64_t,
60 					iptun_cfg_t *);
61 static boolean_t nxge_is_iptun_cls_present(p_nxge_t, uint8_t, int *);
62 
63 /*
64  * functions used outside this file
65  */
66 nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
67 nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
68 nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
69 static nxge_status_t nxge_tcam_handle_ip_fragment(p_nxge_t);
70 nxge_status_t nxge_add_tcam_entry(p_nxge_t, flow_resource_t *);
71 nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *);
72 nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *,
73 	uint32_t *, uint16_t *);
74 int nxge_get_valid_tcam_cnt(p_nxge_t);
75 void nxge_get_tcam_entry_all(p_nxge_t, rx_class_cfg_t *);
76 void nxge_get_tcam_entry(p_nxge_t, flow_resource_t *);
77 void nxge_del_tcam_entry(p_nxge_t, uint32_t);
78 void nxge_add_iptun_class(p_nxge_t, iptun_cfg_t *, uint8_t *);
79 void nxge_cfg_iptun_hash(p_nxge_t, iptun_cfg_t *, uint8_t);
80 void nxge_del_iptun_class(p_nxge_t, uint8_t);
81 void nxge_get_iptun_class(p_nxge_t, iptun_cfg_t *, uint8_t);
82 void nxge_set_ip_cls_sym(p_nxge_t, uint8_t, uint8_t);
83 void nxge_get_ip_cls_sym(p_nxge_t, uint8_t, uint8_t *);
84 
85 
86 nxge_status_t
87 nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location)
88 {
89 	tcam_entry_t tcam_rdptr;
90 	uint64_t asc_ram = 0;
91 	npi_handle_t handle;
92 	npi_status_t status;
93 
94 	handle = nxgep->npi_reg_handle;
95 
96 	bzero((char *)&tcam_rdptr, sizeof (struct tcam_entry));
97 	status = npi_fflp_tcam_entry_read(handle, (tcam_location_t)location,
98 	    (struct tcam_entry *)&tcam_rdptr);
99 	if (status & NPI_FAILURE) {
100 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
101 		    " nxge_tcam_dump_entry:"
102 		    "  tcam read failed at location %d ", location));
103 		return (NXGE_ERROR);
104 	}
105 	status = npi_fflp_tcam_asc_ram_entry_read(handle,
106 	    (tcam_location_t)location, &asc_ram);
107 
108 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "location %x\n"
109 	    " key:  %llx %llx %llx %llx \n"
110 	    " mask: %llx %llx %llx %llx \n"
111 	    " ASC RAM %llx \n", location,
112 	    tcam_rdptr.key0, tcam_rdptr.key1,
113 	    tcam_rdptr.key2, tcam_rdptr.key3,
114 	    tcam_rdptr.mask0, tcam_rdptr.mask1,
115 	    tcam_rdptr.mask2, tcam_rdptr.mask3, asc_ram));
116 	return (NXGE_OK);
117 }
118 
119 void
120 nxge_get_tcam(p_nxge_t nxgep, p_mblk_t mp)
121 {
122 	uint32_t tcam_loc;
123 	int *lptr;
124 	int location;
125 
126 	uint32_t start_location = 0;
127 	uint32_t stop_location = nxgep->classifier.tcam_size;
128 	lptr = (int *)mp->b_rptr;
129 	location = *lptr;
130 
131 	if ((location >= nxgep->classifier.tcam_size) || (location < -1)) {
132 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
133 		    "nxge_tcam_dump: Invalid location %d \n", location));
134 		return;
135 	}
136 	if (location == -1) {
137 		start_location = 0;
138 		stop_location = nxgep->classifier.tcam_size;
139 	} else {
140 		start_location = location;
141 		stop_location = location + 1;
142 	}
143 	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
144 		(void) nxge_tcam_dump_entry(nxgep, tcam_loc);
145 }
146 
147 /*
148  * nxge_fflp_vlan_table_invalidate_all
149  * invalidates the vlan RDC table entries.
150  * INPUT
151  * nxge    soft state data structure
152  * Return
153  *      NXGE_OK
154  *      NXGE_ERROR
155  *
156  */
157 
158 static nxge_status_t
159 nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)
160 {
161 	vlan_id_t vlan_id;
162 	npi_handle_t handle;
163 	npi_status_t rs = NPI_SUCCESS;
164 	vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
165 
166 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_vlan_tbl_clear_all "));
167 	handle = nxgep->npi_reg_handle;
168 	for (vlan_id = start; vlan_id < stop; vlan_id++) {
169 		rs = npi_fflp_cfg_vlan_table_clear(handle, vlan_id);
170 		if (rs != NPI_SUCCESS) {
171 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
172 			    "VLAN Table invalidate failed for vlan id %d ",
173 			    vlan_id));
174 			return (NXGE_ERROR | rs);
175 		}
176 	}
177 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_vlan_tbl_clear_all "));
178 	return (NXGE_OK);
179 }
180 
181 /*
182  * The following functions are used by other modules to init
183  * the fflp module.
184  * these functions are the basic API used to init
185  * the fflp modules (tcam, fcram etc ......)
186  *
187  * The TCAM search future would be disabled  by default.
188  */
189 
190 static nxge_status_t
191 nxge_fflp_tcam_init(p_nxge_t nxgep)
192 {
193 	uint8_t access_ratio;
194 	tcam_class_t class;
195 	npi_status_t rs = NPI_SUCCESS;
196 	npi_handle_t handle;
197 
198 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_tcam_init"));
199 	handle = nxgep->npi_reg_handle;
200 
201 	rs = npi_fflp_cfg_tcam_disable(handle);
202 	if (rs != NPI_SUCCESS) {
203 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed TCAM Disable\n"));
204 		return (NXGE_ERROR | rs);
205 	}
206 
207 	access_ratio = nxgep->param_arr[param_tcam_access_ratio].value;
208 	rs = npi_fflp_cfg_tcam_access(handle, access_ratio);
209 	if (rs != NPI_SUCCESS) {
210 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
211 		    "failed TCAM Access cfg\n"));
212 		return (NXGE_ERROR | rs);
213 	}
214 
215 	/* disable configurable classes */
216 	/* disable the configurable ethernet classes; */
217 	for (class = TCAM_CLASS_ETYPE_1;
218 	    class <= TCAM_CLASS_ETYPE_2; class++) {
219 		rs = npi_fflp_cfg_enet_usr_cls_disable(handle, class);
220 		if (rs != NPI_SUCCESS) {
221 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
222 			    "TCAM USR Ether Class config failed."));
223 			return (NXGE_ERROR | rs);
224 		}
225 	}
226 
227 	/* disable the configurable ip classes; */
228 	for (class = TCAM_CLASS_IP_USER_4;
229 	    class <= TCAM_CLASS_IP_USER_7; class++) {
230 		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
231 		if (rs != NPI_SUCCESS) {
232 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
233 			    "TCAM USR IP Class cnfg failed."));
234 			return (NXGE_ERROR | rs);
235 		}
236 	}
237 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_tcam_init"));
238 	return (NXGE_OK);
239 }
240 
241 /*
242  * nxge_fflp_tcam_invalidate_all
243  * invalidates all the tcam entries.
244  * INPUT
245  * nxge    soft state data structure
246  * Return
247  *      NXGE_OK
248  *      NXGE_ERROR
249  *
250  */
251 
252 
253 static nxge_status_t
254 nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)
255 {
256 	uint16_t location;
257 	npi_status_t rs = NPI_SUCCESS;
258 	npi_handle_t handle;
259 	uint16_t start = 0, stop = nxgep->classifier.tcam_size;
260 	p_nxge_hw_list_t hw_p;
261 
262 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
263 	    "==> nxge_fflp_tcam_invalidate_all"));
264 	handle = nxgep->npi_reg_handle;
265 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
266 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
267 		    " nxge_fflp_tcam_invalidate_all:"
268 		    " common hardware not set", nxgep->niu_type));
269 		return (NXGE_ERROR);
270 	}
271 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
272 	for (location = start; location < stop; location++) {
273 		rs = npi_fflp_tcam_entry_invalidate(handle, location);
274 		if (rs != NPI_SUCCESS) {
275 			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
276 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
277 			    "TCAM invalidate failed at loc %d ", location));
278 			return (NXGE_ERROR | rs);
279 		}
280 	}
281 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
282 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
283 	    "<== nxge_fflp_tcam_invalidate_all"));
284 	return (NXGE_OK);
285 }
286 
287 /*
288  * nxge_fflp_fcram_entry_invalidate_all
289  * invalidates all the FCRAM entries.
290  * INPUT
291  * nxge    soft state data structure
292  * Return
293  *      NXGE_OK
294  *      NXGE_ERROR
295  *
296  */
297 
298 static nxge_status_t
299 nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)
300 {
301 	npi_handle_t handle;
302 	npi_status_t rs = NPI_SUCCESS;
303 	part_id_t pid = 0;
304 	uint8_t base_mask, base_reloc;
305 	fcram_entry_t fc;
306 	uint32_t location;
307 	uint32_t increment, last_location;
308 
309 	/*
310 	 * (1) configure and enable partition 0 with no relocation
311 	 * (2) Assume the FCRAM is used as IPv4 exact match entry cells
312 	 * (3) Invalidate these cells by clearing the valid bit in
313 	 * the subareas 0 and 4
314 	 * (4) disable the partition
315 	 *
316 	 */
317 
318 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_invalidate_all"));
319 
320 	base_mask = base_reloc = 0x0;
321 	handle = nxgep->npi_reg_handle;
322 	rs = npi_fflp_cfg_fcram_partition(handle, pid, base_mask, base_reloc);
323 
324 	if (rs != NPI_SUCCESS) {
325 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed partition cfg\n"));
326 		return (NXGE_ERROR | rs);
327 	}
328 	rs = npi_fflp_cfg_fcram_partition_disable(handle, pid);
329 
330 	if (rs != NPI_SUCCESS) {
331 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
332 		    "failed partition enable\n"));
333 		return (NXGE_ERROR | rs);
334 	}
335 	fc.dreg[0].value = 0;
336 	fc.hash_hdr_valid = 0;
337 	fc.hash_hdr_ext = 1;	/* specify as IPV4 exact match entry */
338 	increment = sizeof (hash_ipv4_t);
339 	last_location = FCRAM_SIZE * 0x40;
340 
341 	for (location = 0; location < last_location; location += increment) {
342 		rs = npi_fflp_fcram_subarea_write(handle, pid,
343 		    location, fc.value[0]);
344 		if (rs != NPI_SUCCESS) {
345 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
346 			    "failed write at location %x ", location));
347 			return (NXGE_ERROR | rs);
348 		}
349 	}
350 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_invalidate_all"));
351 	return (NXGE_OK);
352 }
353 
354 static nxge_status_t
355 nxge_fflp_fcram_init(p_nxge_t nxgep)
356 {
357 	fflp_fcram_output_drive_t strength;
358 	fflp_fcram_qs_t qs;
359 	npi_status_t rs = NPI_SUCCESS;
360 	uint8_t access_ratio;
361 	int partition;
362 	npi_handle_t handle;
363 	uint32_t min_time, max_time, sys_time;
364 
365 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_init"));
366 
367 	/*
368 	 * Recommended values are needed.
369 	 */
370 	min_time = FCRAM_REFRESH_DEFAULT_MIN_TIME;
371 	max_time = FCRAM_REFRESH_DEFAULT_MAX_TIME;
372 	sys_time = FCRAM_REFRESH_DEFAULT_SYS_TIME;
373 
374 	handle = nxgep->npi_reg_handle;
375 	strength = FCRAM_OUTDR_NORMAL;
376 	qs = FCRAM_QS_MODE_QS;
377 	rs = npi_fflp_cfg_fcram_reset(handle, strength, qs);
378 	if (rs != NPI_SUCCESS) {
379 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Reset. "));
380 		return (NXGE_ERROR | rs);
381 	}
382 
383 	access_ratio = nxgep->param_arr[param_fcram_access_ratio].value;
384 	rs = npi_fflp_cfg_fcram_access(handle, access_ratio);
385 	if (rs != NPI_SUCCESS) {
386 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Access ratio"
387 		    "configuration \n"));
388 		return (NXGE_ERROR | rs);
389 	}
390 	rs = npi_fflp_cfg_fcram_refresh_time(handle, min_time,
391 	    max_time, sys_time);
392 	if (rs != NPI_SUCCESS) {
393 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
394 		    "failed FCRAM refresh cfg"));
395 		return (NXGE_ERROR);
396 	}
397 
398 	/* disable all the partitions until explicitly enabled */
399 	for (partition = 0; partition < FFLP_FCRAM_MAX_PARTITION; partition++) {
400 		rs = npi_fflp_cfg_fcram_partition_disable(handle, partition);
401 		if (rs != NPI_SUCCESS) {
402 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
403 			    "failed FCRAM partition"
404 			    " enable for partition %d ", partition));
405 			return (NXGE_ERROR | rs);
406 		}
407 	}
408 
409 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_init"));
410 	return (NXGE_OK);
411 }
412 
413 nxge_status_t
414 nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep, uint8_t alt_mac)
415 {
416 	npi_status_t rs = NPI_SUCCESS;
417 	hostinfo_t mac_rdc;
418 	npi_handle_t handle;
419 	p_nxge_class_pt_cfg_t p_class_cfgp;
420 
421 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
422 	if (p_class_cfgp->mac_host_info[alt_mac].flag == 0) {
423 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
424 		    " nxge_logical_mac_assign_rdc_table"
425 		    " unconfigured alt MAC addr %d ", alt_mac));
426 		return (NXGE_ERROR);
427 	}
428 	handle = nxgep->npi_reg_handle;
429 	mac_rdc.value = 0;
430 	mac_rdc.bits.w0.rdc_tbl_num =
431 	    p_class_cfgp->mac_host_info[alt_mac].rdctbl;
432 	mac_rdc.bits.w0.mac_pref = p_class_cfgp->mac_host_info[alt_mac].mpr_npr;
433 
434 	rs = npi_mac_hostinfo_entry(handle, OP_SET,
435 	    nxgep->function_num, alt_mac, &mac_rdc);
436 
437 	if (rs != NPI_SUCCESS) {
438 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
439 		    "failed Assign RDC table"));
440 		return (NXGE_ERROR | rs);
441 	}
442 	return (NXGE_OK);
443 }
444 
445 nxge_status_t
446 nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)
447 {
448 	npi_status_t rs = NPI_SUCCESS;
449 	hostinfo_t mac_rdc;
450 	npi_handle_t handle;
451 
452 	handle = nxgep->npi_reg_handle;
453 	mac_rdc.value = 0;
454 	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mac_rdcgrp;
455 	mac_rdc.bits.w0.mac_pref = 1;
456 	switch (nxgep->function_num) {
457 	case 0:
458 	case 1:
459 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
460 		    nxgep->function_num, XMAC_UNIQUE_HOST_INFO_ENTRY, &mac_rdc);
461 		break;
462 	case 2:
463 	case 3:
464 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
465 		    nxgep->function_num, BMAC_UNIQUE_HOST_INFO_ENTRY, &mac_rdc);
466 		break;
467 	default:
468 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
469 		    "failed Assign RDC table (invalid function #)"));
470 		return (NXGE_ERROR);
471 	}
472 
473 	if (rs != NPI_SUCCESS) {
474 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
475 		    "failed Assign RDC table"));
476 		return (NXGE_ERROR | rs);
477 	}
478 	return (NXGE_OK);
479 }
480 
481 /*
482  * Initialize hostinfo registers for alternate MAC addresses and
483  * multicast MAC address.
484  */
485 nxge_status_t
486 nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)
487 {
488 	npi_status_t rs = NPI_SUCCESS;
489 	hostinfo_t mac_rdc;
490 	npi_handle_t handle;
491 	int i;
492 
493 	handle = nxgep->npi_reg_handle;
494 	mac_rdc.value = 0;
495 	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mcast_rdcgrp;
496 	mac_rdc.bits.w0.mac_pref = 1;
497 	switch (nxgep->function_num) {
498 	case 0:
499 	case 1:
500 		/*
501 		 * Tests indicate that it is OK not to re-initialize the
502 		 * hostinfo registers for the XMAC's alternate MAC
503 		 * addresses. But that is necessary for BMAC (case 2
504 		 * and case 3 below)
505 		 */
506 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
507 		    nxgep->function_num,
508 		    XMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
509 		break;
510 	case 2:
511 	case 3:
512 		for (i = 1; i <= BMAC_MAX_ALT_ADDR_ENTRY; i++)
513 			rs |= npi_mac_hostinfo_entry(handle, OP_SET,
514 			    nxgep->function_num, i, &mac_rdc);
515 
516 		rs |= npi_mac_hostinfo_entry(handle, OP_SET,
517 		    nxgep->function_num,
518 		    BMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
519 		break;
520 	default:
521 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
522 		    "failed Assign RDC table (invalid function #)"));
523 		return (NXGE_ERROR);
524 	}
525 
526 	if (rs != NPI_SUCCESS) {
527 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
528 		    "failed Assign RDC table"));
529 		return (NXGE_ERROR | rs);
530 	}
531 	return (NXGE_OK);
532 }
533 
534 nxge_status_t
535 nxge_fflp_init_hostinfo(p_nxge_t nxgep)
536 {
537 	nxge_status_t status = NXGE_OK;
538 
539 	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
540 	status |= nxge_main_mac_assign_rdc_table(nxgep);
541 	return (status);
542 }
543 
544 nxge_status_t
545 nxge_fflp_hw_reset(p_nxge_t nxgep)
546 {
547 	npi_handle_t handle;
548 	npi_status_t rs = NPI_SUCCESS;
549 	nxge_status_t status = NXGE_OK;
550 
551 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_hw_reset"));
552 
553 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
554 		status = nxge_fflp_fcram_init(nxgep);
555 		if (status != NXGE_OK) {
556 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
557 			    " failed FCRAM init. "));
558 			return (status);
559 		}
560 	}
561 
562 	status = nxge_fflp_tcam_init(nxgep);
563 	if (status != NXGE_OK) {
564 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
565 		    "failed TCAM init."));
566 		return (status);
567 	}
568 
569 	handle = nxgep->npi_reg_handle;
570 	rs = npi_fflp_cfg_llcsnap_enable(handle);
571 	if (rs != NPI_SUCCESS) {
572 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
573 		    "failed LLCSNAP enable. "));
574 		return (NXGE_ERROR | rs);
575 	}
576 
577 	rs = npi_fflp_cfg_cam_errorcheck_disable(handle);
578 	if (rs != NPI_SUCCESS) {
579 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
580 		    "failed CAM Error Check enable. "));
581 		return (NXGE_ERROR | rs);
582 	}
583 
584 	/* init the hash generators */
585 	rs = npi_fflp_cfg_hash_h1poly(handle, 0);
586 	if (rs != NPI_SUCCESS) {
587 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
588 		    "failed H1 Poly Init. "));
589 		return (NXGE_ERROR | rs);
590 	}
591 
592 	rs = npi_fflp_cfg_hash_h2poly(handle, 0);
593 	if (rs != NPI_SUCCESS) {
594 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
595 		    "failed H2 Poly Init. "));
596 		return (NXGE_ERROR | rs);
597 	}
598 
599 	/* invalidate TCAM entries */
600 	status = nxge_fflp_tcam_invalidate_all(nxgep);
601 	if (status != NXGE_OK) {
602 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
603 		    "failed TCAM Entry Invalidate. "));
604 		return (status);
605 	}
606 
607 	/* invalidate FCRAM entries */
608 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
609 		status = nxge_fflp_fcram_invalidate_all(nxgep);
610 		if (status != NXGE_OK) {
611 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
612 			    "failed FCRAM Entry Invalidate."));
613 			return (status);
614 		}
615 	}
616 
617 	/* invalidate VLAN RDC tables */
618 	status = nxge_fflp_vlan_tbl_clear_all(nxgep);
619 	if (status != NXGE_OK) {
620 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
621 		    "failed VLAN Table Invalidate. "));
622 		return (status);
623 	}
624 	nxgep->classifier.state |= NXGE_FFLP_HW_RESET;
625 
626 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_hw_reset"));
627 	return (NXGE_OK);
628 }
629 
630 nxge_status_t
631 nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep, tcam_class_t l3_class,
632 	uint32_t class_config)
633 {
634 	flow_key_cfg_t fcfg;
635 	npi_handle_t handle;
636 	npi_status_t rs = NPI_SUCCESS;
637 
638 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key"));
639 	handle = nxgep->npi_reg_handle;
640 	bzero(&fcfg, sizeof (flow_key_cfg_t));
641 
642 	if (class_config & NXGE_CLASS_FLOW_USE_PROTO)
643 		fcfg.use_proto = 1;
644 	if (class_config & NXGE_CLASS_FLOW_USE_DST_PORT)
645 		fcfg.use_dport = 1;
646 	if (class_config & NXGE_CLASS_FLOW_USE_SRC_PORT)
647 		fcfg.use_sport = 1;
648 	if (class_config & NXGE_CLASS_FLOW_USE_IPDST)
649 		fcfg.use_daddr = 1;
650 	if (class_config & NXGE_CLASS_FLOW_USE_IPSRC)
651 		fcfg.use_saddr = 1;
652 	if (class_config & NXGE_CLASS_FLOW_USE_VLAN)
653 		fcfg.use_vlan = 1;
654 	if (class_config & NXGE_CLASS_FLOW_USE_L2DA)
655 		fcfg.use_l2da = 1;
656 	if (class_config & NXGE_CLASS_FLOW_USE_PORTNUM)
657 		fcfg.use_portnum = 1;
658 	fcfg.ip_opts_exist = 0;
659 
660 	rs = npi_fflp_cfg_ip_cls_flow_key(handle, l3_class, &fcfg);
661 	if (rs & NPI_FFLP_ERROR) {
662 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
663 		    " opt %x for class %d failed ", class_config, l3_class));
664 		return (NXGE_ERROR | rs);
665 	}
666 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_cfg_ip_cls_flow_key"));
667 	return (NXGE_OK);
668 }
669 
670 nxge_status_t
671 nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep, tcam_class_t l3_class,
672 	uint32_t *class_config)
673 {
674 	flow_key_cfg_t fcfg;
675 	npi_handle_t handle;
676 	npi_status_t rs = NPI_SUCCESS;
677 	uint32_t ccfg = 0;
678 
679 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key_get"));
680 	handle = nxgep->npi_reg_handle;
681 	bzero(&fcfg, sizeof (flow_key_cfg_t));
682 
683 	rs = npi_fflp_cfg_ip_cls_flow_key_get(handle, l3_class, &fcfg);
684 	if (rs & NPI_FFLP_ERROR) {
685 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
686 		    " opt %x for class %d failed ", class_config, l3_class));
687 		return (NXGE_ERROR | rs);
688 	}
689 
690 	if (fcfg.use_proto)
691 		ccfg |= NXGE_CLASS_FLOW_USE_PROTO;
692 	if (fcfg.use_dport)
693 		ccfg |= NXGE_CLASS_FLOW_USE_DST_PORT;
694 	if (fcfg.use_sport)
695 		ccfg |= NXGE_CLASS_FLOW_USE_SRC_PORT;
696 	if (fcfg.use_daddr)
697 		ccfg |= NXGE_CLASS_FLOW_USE_IPDST;
698 	if (fcfg.use_saddr)
699 		ccfg |= NXGE_CLASS_FLOW_USE_IPSRC;
700 	if (fcfg.use_vlan)
701 		ccfg |= NXGE_CLASS_FLOW_USE_VLAN;
702 	if (fcfg.use_l2da)
703 		ccfg |= NXGE_CLASS_FLOW_USE_L2DA;
704 	if (fcfg.use_portnum)
705 		ccfg |= NXGE_CLASS_FLOW_USE_PORTNUM;
706 
707 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
708 	    " nxge_cfg_ip_cls_flow_key_get %x", ccfg));
709 	*class_config = ccfg;
710 
711 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
712 	    " <== nxge_cfg_ip_cls_flow_key_get"));
713 	return (NXGE_OK);
714 }
715 
716 static nxge_status_t
717 nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep, tcam_class_t class,
718 	uint32_t *class_config)
719 {
720 	npi_status_t rs = NPI_SUCCESS;
721 	tcam_key_cfg_t cfg;
722 	npi_handle_t handle;
723 	uint32_t ccfg = 0;
724 
725 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
726 
727 	bzero(&cfg, sizeof (tcam_key_cfg_t));
728 	handle = nxgep->npi_reg_handle;
729 
730 	rs = npi_fflp_cfg_ip_cls_tcam_key_get(handle, class, &cfg);
731 	if (rs & NPI_FFLP_ERROR) {
732 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
733 		    " opt %x for class %d failed ", class_config, class));
734 		return (NXGE_ERROR | rs);
735 	}
736 	if (cfg.discard)
737 		ccfg |= NXGE_CLASS_DISCARD;
738 	if (cfg.lookup_enable)
739 		ccfg |= NXGE_CLASS_TCAM_LOOKUP;
740 	if (cfg.use_ip_daddr)
741 		ccfg |= NXGE_CLASS_TCAM_USE_SRC_ADDR;
742 	*class_config = ccfg;
743 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
744 	    " ==> nxge_cfg_tcam_ip_class %x", ccfg));
745 	return (NXGE_OK);
746 }
747 
748 static nxge_status_t
749 nxge_cfg_tcam_ip_class(p_nxge_t nxgep, tcam_class_t class,
750 	uint32_t class_config)
751 {
752 	npi_status_t rs = NPI_SUCCESS;
753 	tcam_key_cfg_t cfg;
754 	npi_handle_t handle;
755 	p_nxge_class_pt_cfg_t p_class_cfgp;
756 
757 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
758 
759 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
760 	p_class_cfgp->class_cfg[class] = class_config;
761 
762 	bzero(&cfg, sizeof (tcam_key_cfg_t));
763 	handle = nxgep->npi_reg_handle;
764 	cfg.discard = 0;
765 	cfg.lookup_enable = 0;
766 	cfg.use_ip_daddr = 0;
767 	if (class_config & NXGE_CLASS_DISCARD)
768 		cfg.discard = 1;
769 	if (class_config & NXGE_CLASS_TCAM_LOOKUP)
770 		cfg.lookup_enable = 1;
771 	if (class_config & NXGE_CLASS_TCAM_USE_SRC_ADDR)
772 		cfg.use_ip_daddr = 1;
773 
774 	rs = npi_fflp_cfg_ip_cls_tcam_key(handle, class, &cfg);
775 	if (rs & NPI_FFLP_ERROR) {
776 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
777 		    " opt %x for class %d failed ", class_config, class));
778 		return (NXGE_ERROR | rs);
779 	}
780 	return (NXGE_OK);
781 }
782 
783 nxge_status_t
784 nxge_fflp_set_hash1(p_nxge_t nxgep, uint32_t h1)
785 {
786 	npi_status_t rs = NPI_SUCCESS;
787 	npi_handle_t handle;
788 	p_nxge_class_pt_cfg_t p_class_cfgp;
789 
790 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h1"));
791 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
792 	p_class_cfgp->init_h1 = h1;
793 	handle = nxgep->npi_reg_handle;
794 	rs = npi_fflp_cfg_hash_h1poly(handle, h1);
795 	if (rs & NPI_FFLP_ERROR) {
796 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
797 		    " nxge_fflp_init_h1 %x failed ", h1));
798 		return (NXGE_ERROR | rs);
799 	}
800 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h1"));
801 	return (NXGE_OK);
802 }
803 
804 nxge_status_t
805 nxge_fflp_set_hash2(p_nxge_t nxgep, uint16_t h2)
806 {
807 	npi_status_t rs = NPI_SUCCESS;
808 	npi_handle_t handle;
809 	p_nxge_class_pt_cfg_t p_class_cfgp;
810 
811 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h2"));
812 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
813 	p_class_cfgp->init_h2 = h2;
814 
815 	handle = nxgep->npi_reg_handle;
816 	rs = npi_fflp_cfg_hash_h2poly(handle, h2);
817 	if (rs & NPI_FFLP_ERROR) {
818 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
819 		    " nxge_fflp_init_h2 %x failed ", h2));
820 		return (NXGE_ERROR | rs);
821 	}
822 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h2"));
823 	return (NXGE_OK);
824 }
825 
826 nxge_status_t
827 nxge_classify_init_sw(p_nxge_t nxgep)
828 {
829 	nxge_classify_t *classify_ptr;
830 
831 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw"));
832 	classify_ptr = &nxgep->classifier;
833 
834 	if (classify_ptr->state & NXGE_FFLP_SW_INIT) {
835 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
836 		    "nxge_classify_init_sw already init"));
837 		return (NXGE_OK);
838 	}
839 
840 	classify_ptr->tcam_size = nxgep->nxge_hw_p->tcam_size / nxgep->nports;
841 	classify_ptr->tcam_entries = (tcam_flow_spec_t *)nxgep->nxge_hw_p->tcam;
842 	classify_ptr->tcam_top = nxgep->function_num;
843 
844 	/* Init defaults */
845 	/*
846 	 * add hacks required for HW shortcomings for example, code to handle
847 	 * fragmented packets
848 	 */
849 	nxge_init_h1_table();
850 	nxge_crc_ccitt_init();
851 	nxgep->classifier.tcam_location = nxgep->function_num;
852 	nxgep->classifier.fragment_bug = 1;
853 	classify_ptr->state |= NXGE_FFLP_SW_INIT;
854 
855 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_sw"));
856 	return (NXGE_OK);
857 }
858 
859 nxge_status_t
860 nxge_classify_exit_sw(p_nxge_t nxgep)
861 {
862 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw"));
863 	nxgep->classifier.state = NULL;
864 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw"));
865 	return (NXGE_OK);
866 }
867 
868 /*
869  * Figures out the RDC Group for the entry
870  *
871  * The current implementation is just a place holder and it
872  * returns 0.
873  * The real location determining algorithm would consider
874  * the partition etc ... before deciding w
875  *
876  */
877 
878 /* ARGSUSED */
879 static uint8_t
880 nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, uint64_t cookie)
881 {
882 	int use_port_rdc_grp = 0;
883 	uint8_t rdc_grp = 0;
884 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
885 	p_nxge_hw_pt_cfg_t p_cfgp;
886 	p_nxge_rdc_grp_t rdc_grp_p;
887 
888 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
889 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
890 	rdc_grp_p = &p_dma_cfgp->rdc_grps[use_port_rdc_grp];
891 	rdc_grp = p_cfgp->def_mac_rxdma_grpid;
892 
893 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
894 	    "nxge_get_rdc_group: grp 0x%x real_grp %x grpp $%p\n",
895 	    cookie, rdc_grp, rdc_grp_p));
896 	return (rdc_grp);
897 }
898 
899 /* ARGSUSED */
900 static uint8_t
901 nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, uint64_t cookie)
902 {
903 	return ((uint8_t)cookie);
904 }
905 
906 /* ARGSUSED */
907 static void
908 nxge_fill_tcam_entry_udp(p_nxge_t nxgep, flow_spec_t *flow_spec,
909 	tcam_entry_t *tcam_ptr)
910 {
911 	udpip4_spec_t *fspec_key;
912 	udpip4_spec_t *fspec_mask;
913 
914 	fspec_key = (udpip4_spec_t *)&flow_spec->uh.udpip4spec;
915 	fspec_mask = (udpip4_spec_t *)&flow_spec->um.udpip4spec;
916 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
917 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
918 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
919 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
920 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
921 	    fspec_key->pdst, fspec_key->psrc);
922 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
923 	    fspec_mask->pdst, fspec_mask->psrc);
924 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
925 	    tcam_ptr->ip4_class_mask,
926 	    TCAM_CLASS_UDP_IPV4);
927 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
928 	    tcam_ptr->ip4_proto_mask,
929 	    IPPROTO_UDP);
930 	tcam_ptr->ip4_tos_key = fspec_key->tos;
931 	tcam_ptr->ip4_tos_mask = fspec_mask->tos;
932 }
933 
934 static void
935 nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
936 	tcam_entry_t *tcam_ptr)
937 {
938 	udpip6_spec_t *fspec_key;
939 	udpip6_spec_t *fspec_mask;
940 	p_nxge_class_pt_cfg_t p_class_cfgp;
941 
942 	fspec_key = (udpip6_spec_t *)&flow_spec->uh.udpip6spec;
943 	fspec_mask = (udpip6_spec_t *)&flow_spec->um.udpip6spec;
944 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
945 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
946 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
947 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
948 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
949 	} else {
950 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
951 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
952 	}
953 
954 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
955 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_UDP_IPV6);
956 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
957 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_UDP);
958 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
959 	    fspec_key->pdst, fspec_key->psrc);
960 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
961 	    fspec_mask->pdst, fspec_mask->psrc);
962 	tcam_ptr->ip6_tos_key = fspec_key->tos;
963 	tcam_ptr->ip6_tos_mask = fspec_mask->tos;
964 }
965 
966 /* ARGSUSED */
967 static void
968 nxge_fill_tcam_entry_tcp(p_nxge_t nxgep, flow_spec_t *flow_spec,
969 	tcam_entry_t *tcam_ptr)
970 {
971 	tcpip4_spec_t *fspec_key;
972 	tcpip4_spec_t *fspec_mask;
973 
974 	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
975 	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
976 
977 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
978 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
979 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
980 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
981 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
982 	    fspec_key->pdst, fspec_key->psrc);
983 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
984 	    fspec_mask->pdst, fspec_mask->psrc);
985 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
986 	    tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4);
987 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
988 	    tcam_ptr->ip4_proto_mask, IPPROTO_TCP);
989 	tcam_ptr->ip4_tos_key = fspec_key->tos;
990 	tcam_ptr->ip4_tos_mask = fspec_mask->tos;
991 }
992 
993 /* ARGSUSED */
994 static void
995 nxge_fill_tcam_entry_sctp(p_nxge_t nxgep, flow_spec_t *flow_spec,
996 	tcam_entry_t *tcam_ptr)
997 {
998 	tcpip4_spec_t *fspec_key;
999 	tcpip4_spec_t *fspec_mask;
1000 
1001 	fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
1002 	fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
1003 
1004 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1005 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1006 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1007 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1008 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1009 	    tcam_ptr->ip4_class_mask, TCAM_CLASS_SCTP_IPV4);
1010 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1011 	    tcam_ptr->ip4_proto_mask, IPPROTO_SCTP);
1012 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
1013 	    fspec_key->pdst, fspec_key->psrc);
1014 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
1015 	    fspec_mask->pdst, fspec_mask->psrc);
1016 	tcam_ptr->ip4_tos_key = fspec_key->tos;
1017 	tcam_ptr->ip4_tos_mask = fspec_mask->tos;
1018 }
1019 
1020 static void
1021 nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1022 	tcam_entry_t *tcam_ptr)
1023 {
1024 	tcpip6_spec_t *fspec_key;
1025 	tcpip6_spec_t *fspec_mask;
1026 	p_nxge_class_pt_cfg_t p_class_cfgp;
1027 
1028 	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
1029 	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
1030 
1031 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1032 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1033 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1034 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1035 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1036 	} else {
1037 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1038 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1039 	}
1040 
1041 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1042 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_TCP_IPV6);
1043 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1044 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_TCP);
1045 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1046 	    fspec_key->pdst, fspec_key->psrc);
1047 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1048 	    fspec_mask->pdst, fspec_mask->psrc);
1049 	tcam_ptr->ip6_tos_key = fspec_key->tos;
1050 	tcam_ptr->ip6_tos_mask = fspec_mask->tos;
1051 }
1052 
1053 static void
1054 nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1055 	tcam_entry_t *tcam_ptr)
1056 {
1057 	tcpip6_spec_t *fspec_key;
1058 	tcpip6_spec_t *fspec_mask;
1059 	p_nxge_class_pt_cfg_t p_class_cfgp;
1060 
1061 	fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
1062 	fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
1063 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1064 
1065 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1066 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1067 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1068 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1069 	} else {
1070 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1071 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1072 	}
1073 
1074 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1075 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_SCTP_IPV6);
1076 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1077 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_SCTP);
1078 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1079 	    fspec_key->pdst, fspec_key->psrc);
1080 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1081 	    fspec_mask->pdst, fspec_mask->psrc);
1082 	tcam_ptr->ip6_tos_key = fspec_key->tos;
1083 	tcam_ptr->ip6_tos_mask = fspec_mask->tos;
1084 }
1085 
1086 /* ARGSUSED */
1087 static void
1088 nxge_fill_tcam_entry_ah_esp(p_nxge_t nxgep, flow_spec_t *flow_spec,
1089 	tcam_entry_t *tcam_ptr)
1090 {
1091 	ahip4_spec_t *fspec_key;
1092 	ahip4_spec_t *fspec_mask;
1093 
1094 	fspec_key = (ahip4_spec_t *)&flow_spec->uh.ahip4spec;
1095 	fspec_mask = (ahip4_spec_t *)&flow_spec->um.ahip4spec;
1096 
1097 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1098 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1099 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1100 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1101 
1102 	tcam_ptr->ip4_port_key = fspec_key->spi;
1103 	tcam_ptr->ip4_port_mask = fspec_mask->spi;
1104 
1105 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1106 	    tcam_ptr->ip4_class_mask,
1107 	    TCAM_CLASS_AH_ESP_IPV4);
1108 
1109 	if (flow_spec->flow_type == FSPEC_AHIP4) {
1110 		TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1111 		    tcam_ptr->ip4_proto_mask, IPPROTO_AH);
1112 	} else {
1113 		TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1114 		    tcam_ptr->ip4_proto_mask, IPPROTO_ESP);
1115 	}
1116 	tcam_ptr->ip4_tos_key = fspec_key->tos;
1117 	tcam_ptr->ip4_tos_mask = fspec_mask->tos;
1118 }
1119 
1120 static void
1121 nxge_fill_tcam_entry_ah_esp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1122 	tcam_entry_t *tcam_ptr)
1123 {
1124 	ahip6_spec_t *fspec_key;
1125 	ahip6_spec_t *fspec_mask;
1126 	p_nxge_class_pt_cfg_t p_class_cfgp;
1127 
1128 	fspec_key = (ahip6_spec_t *)&flow_spec->uh.ahip6spec;
1129 	fspec_mask = (ahip6_spec_t *)&flow_spec->um.ahip6spec;
1130 
1131 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1132 	if (p_class_cfgp->class_cfg[TCAM_CLASS_AH_ESP_IPV6] &
1133 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1134 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1135 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1136 	} else {
1137 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1138 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1139 	}
1140 
1141 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1142 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_AH_ESP_IPV6);
1143 
1144 	if (flow_spec->flow_type == FSPEC_AHIP6) {
1145 		TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1146 		    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_AH);
1147 	} else {
1148 		TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1149 		    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_ESP);
1150 	}
1151 	tcam_ptr->ip6_port_key = fspec_key->spi;
1152 	tcam_ptr->ip6_port_mask = fspec_mask->spi;
1153 	tcam_ptr->ip6_tos_key = fspec_key->tos;
1154 	tcam_ptr->ip6_tos_mask = fspec_mask->tos;
1155 }
1156 
1157 /* ARGSUSED */
1158 static void
1159 nxge_fill_tcam_entry_ip_usr(p_nxge_t nxgep, flow_spec_t *flow_spec,
1160 	tcam_entry_t *tcam_ptr, tcam_class_t class)
1161 {
1162 	ip_user_spec_t *fspec_key;
1163 	ip_user_spec_t *fspec_mask;
1164 
1165 	fspec_key = (ip_user_spec_t *)&flow_spec->uh.ip_usr_spec;
1166 	fspec_mask = (ip_user_spec_t *)&flow_spec->um.ip_usr_spec;
1167 
1168 	if (fspec_key->ip_ver == FSPEC_IP4) {
1169 		TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1170 		TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1171 		TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1172 		TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1173 
1174 		tcam_ptr->ip4_port_key = fspec_key->l4_4_bytes;
1175 		tcam_ptr->ip4_port_mask = fspec_mask->l4_4_bytes;
1176 
1177 		TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1178 		    tcam_ptr->ip4_class_mask, class);
1179 
1180 		tcam_ptr->ip4_proto_key = fspec_key->proto;
1181 		tcam_ptr->ip4_proto_mask = fspec_mask->proto;
1182 
1183 		tcam_ptr->ip4_tos_key = fspec_key->tos;
1184 		tcam_ptr->ip4_tos_mask = fspec_mask->tos;
1185 	}
1186 }
1187 
1188 
1189 nxge_status_t
1190 nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res,
1191 	uint32_t *H1, uint16_t *H2)
1192 {
1193 	flow_spec_t *flow_spec;
1194 	uint32_t class_cfg;
1195 	flow_template_t ft;
1196 	p_nxge_class_pt_cfg_t p_class_cfgp;
1197 
1198 	int ft_size = sizeof (flow_template_t);
1199 
1200 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_flow_get_hash"));
1201 
1202 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1203 	bzero((char *)&ft, ft_size);
1204 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1205 
1206 	switch (flow_spec->flow_type) {
1207 	case FSPEC_TCPIP4:
1208 		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_TCP_IPV4];
1209 		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1210 			ft.ip_proto = IPPROTO_TCP;
1211 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1212 			ft.ip4_saddr = flow_res->flow_spec.uh.tcpip4spec.ip4src;
1213 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1214 			ft.ip4_daddr = flow_res->flow_spec.uh.tcpip4spec.ip4dst;
1215 		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1216 			ft.ip_src_port = flow_res->flow_spec.uh.tcpip4spec.psrc;
1217 		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1218 			ft.ip_dst_port = flow_res->flow_spec.uh.tcpip4spec.pdst;
1219 		break;
1220 
1221 	case FSPEC_UDPIP4:
1222 		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV4];
1223 		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1224 			ft.ip_proto = IPPROTO_UDP;
1225 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1226 			ft.ip4_saddr = flow_res->flow_spec.uh.udpip4spec.ip4src;
1227 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1228 			ft.ip4_daddr = flow_res->flow_spec.uh.udpip4spec.ip4dst;
1229 		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1230 			ft.ip_src_port = flow_res->flow_spec.uh.udpip4spec.psrc;
1231 		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1232 			ft.ip_dst_port = flow_res->flow_spec.uh.udpip4spec.pdst;
1233 		break;
1234 
1235 	default:
1236 		return (NXGE_ERROR);
1237 	}
1238 
1239 	*H1 = nxge_compute_h1(p_class_cfgp->init_h1,
1240 	    (uint32_t *)&ft, ft_size) & 0xfffff;
1241 	*H2 = nxge_compute_h2(p_class_cfgp->init_h2,
1242 	    (uint8_t *)&ft, ft_size);
1243 
1244 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_flow_get_hash"));
1245 	return (NXGE_OK);
1246 }
1247 
1248 nxge_status_t
1249 nxge_add_fcram_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1250 {
1251 	uint32_t H1;
1252 	uint16_t H2;
1253 	nxge_status_t status = NXGE_OK;
1254 
1255 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_fcram_entry"));
1256 	status = nxge_flow_get_hash(nxgep, flow_res, &H1, &H2);
1257 	if (status != NXGE_OK) {
1258 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1259 		    " nxge_add_fcram_entry failed "));
1260 		return (status);
1261 	}
1262 
1263 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_fcram_entry"));
1264 	return (NXGE_OK);
1265 }
1266 
1267 /*
1268  * Already decided this flow goes into the tcam
1269  */
1270 
1271 nxge_status_t
1272 nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1273 {
1274 	npi_handle_t handle;
1275 	uint64_t channel_cookie;
1276 	uint64_t flow_cookie;
1277 	flow_spec_t *flow_spec;
1278 	npi_status_t rs = NPI_SUCCESS;
1279 	tcam_entry_t tcam_ptr;
1280 	tcam_location_t location;
1281 	uint8_t offset, rdc_grp;
1282 	p_nxge_hw_list_t hw_p;
1283 	uint64_t class;
1284 
1285 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry"));
1286 	handle = nxgep->npi_reg_handle;
1287 
1288 	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1289 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1290 	flow_cookie = flow_res->flow_cookie;
1291 	channel_cookie = flow_res->channel_cookie;
1292 	location = (tcam_location_t)nxge_tcam_get_index(nxgep,
1293 	    (uint16_t)flow_res->location);
1294 
1295 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1296 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1297 		    " nxge_add_tcam_entry: common hardware not set",
1298 		    nxgep->niu_type));
1299 		return (NXGE_ERROR);
1300 	}
1301 
1302 	if (flow_spec->flow_type == FSPEC_IP_USR) {
1303 		int i;
1304 		int add_usr_cls = 0;
1305 		int ipv6 = 0;
1306 		ip_user_spec_t *uspec = &flow_spec->uh.ip_usr_spec;
1307 		ip_user_spec_t *umask = &flow_spec->um.ip_usr_spec;
1308 		nxge_usr_l3_cls_t *l3_ucls_p;
1309 
1310 		MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1311 
1312 		for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
1313 			l3_ucls_p = &hw_p->tcam_l3_prog_cls[i];
1314 			if (l3_ucls_p->valid && l3_ucls_p->tcam_ref_cnt) {
1315 				if (uspec->proto == l3_ucls_p->pid) {
1316 					class = l3_ucls_p->cls;
1317 					l3_ucls_p->tcam_ref_cnt++;
1318 					add_usr_cls = 1;
1319 					break;
1320 				}
1321 			} else if (l3_ucls_p->valid == 0) {
1322 				/* Program new user IP class */
1323 				switch (i) {
1324 				case 0:
1325 					class = TCAM_CLASS_IP_USER_4;
1326 					break;
1327 				case 1:
1328 					class = TCAM_CLASS_IP_USER_5;
1329 					break;
1330 				case 2:
1331 					class = TCAM_CLASS_IP_USER_6;
1332 					break;
1333 				case 3:
1334 					class = TCAM_CLASS_IP_USER_7;
1335 					break;
1336 				default:
1337 					break;
1338 				}
1339 				if (uspec->ip_ver == FSPEC_IP6)
1340 					ipv6 = 1;
1341 				rs = npi_fflp_cfg_ip_usr_cls_set(handle,
1342 				    (tcam_class_t)class, uspec->tos,
1343 				    umask->tos, uspec->proto, ipv6);
1344 				if (rs != NPI_SUCCESS)
1345 					goto fail;
1346 
1347 				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
1348 				    (tcam_class_t)class);
1349 				if (rs != NPI_SUCCESS)
1350 					goto fail;
1351 
1352 				l3_ucls_p->cls = class;
1353 				l3_ucls_p->pid = uspec->proto;
1354 				l3_ucls_p->tcam_ref_cnt++;
1355 				l3_ucls_p->valid = 1;
1356 				add_usr_cls = 1;
1357 				break;
1358 			} else if (l3_ucls_p->tcam_ref_cnt == 0 &&
1359 			    uspec->proto == l3_ucls_p->pid) {
1360 				/*
1361 				 * The class has already been programmed,
1362 				 * probably for flow hash
1363 				 */
1364 				class = l3_ucls_p->cls;
1365 				if (uspec->ip_ver == FSPEC_IP6)
1366 					ipv6 = 1;
1367 				rs = npi_fflp_cfg_ip_usr_cls_set(handle,
1368 				    (tcam_class_t)class, uspec->tos,
1369 				    umask->tos, uspec->proto, ipv6);
1370 				if (rs != NPI_SUCCESS)
1371 					goto fail;
1372 
1373 				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
1374 				    (tcam_class_t)class);
1375 				if (rs != NPI_SUCCESS)
1376 					goto fail;
1377 
1378 				l3_ucls_p->pid = uspec->proto;
1379 				l3_ucls_p->tcam_ref_cnt++;
1380 				add_usr_cls = 1;
1381 				break;
1382 			}
1383 		}
1384 		if (!add_usr_cls) {
1385 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1386 			    "nxge_add_tcam_entry: Could not find/insert class"
1387 			    "for pid %d", uspec->proto));
1388 			goto fail;
1389 		}
1390 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1391 	}
1392 
1393 	switch (flow_spec->flow_type) {
1394 	case FSPEC_TCPIP4:
1395 		nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr);
1396 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4,
1397 		    flow_cookie);
1398 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4,
1399 		    channel_cookie);
1400 		break;
1401 
1402 	case FSPEC_UDPIP4:
1403 		nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr);
1404 		rdc_grp = nxge_get_rdc_group(nxgep,
1405 		    TCAM_CLASS_UDP_IPV4,
1406 		    flow_cookie);
1407 		offset = nxge_get_rdc_offset(nxgep,
1408 		    TCAM_CLASS_UDP_IPV4,
1409 		    channel_cookie);
1410 		break;
1411 
1412 	case FSPEC_TCPIP6:
1413 		nxge_fill_tcam_entry_tcp_ipv6(nxgep,
1414 		    flow_spec, &tcam_ptr);
1415 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6,
1416 		    flow_cookie);
1417 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6,
1418 		    channel_cookie);
1419 		break;
1420 
1421 	case FSPEC_UDPIP6:
1422 		nxge_fill_tcam_entry_udp_ipv6(nxgep,
1423 		    flow_spec, &tcam_ptr);
1424 		rdc_grp = nxge_get_rdc_group(nxgep,
1425 		    TCAM_CLASS_UDP_IPV6,
1426 		    flow_cookie);
1427 		offset = nxge_get_rdc_offset(nxgep,
1428 		    TCAM_CLASS_UDP_IPV6,
1429 		    channel_cookie);
1430 		break;
1431 
1432 	case FSPEC_SCTPIP4:
1433 		nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr);
1434 		rdc_grp = nxge_get_rdc_group(nxgep,
1435 		    TCAM_CLASS_SCTP_IPV4,
1436 		    flow_cookie);
1437 		offset = nxge_get_rdc_offset(nxgep,
1438 		    TCAM_CLASS_SCTP_IPV4,
1439 		    channel_cookie);
1440 		break;
1441 
1442 	case FSPEC_SCTPIP6:
1443 		nxge_fill_tcam_entry_sctp_ipv6(nxgep,
1444 		    flow_spec, &tcam_ptr);
1445 		rdc_grp = nxge_get_rdc_group(nxgep,
1446 		    TCAM_CLASS_SCTP_IPV6,
1447 		    flow_cookie);
1448 		offset = nxge_get_rdc_offset(nxgep,
1449 		    TCAM_CLASS_SCTP_IPV6,
1450 		    channel_cookie);
1451 		break;
1452 
1453 	case FSPEC_AHIP4:
1454 	case FSPEC_ESPIP4:
1455 		nxge_fill_tcam_entry_ah_esp(nxgep, flow_spec, &tcam_ptr);
1456 		rdc_grp = nxge_get_rdc_group(nxgep,
1457 		    TCAM_CLASS_AH_ESP_IPV4,
1458 		    flow_cookie);
1459 		offset = nxge_get_rdc_offset(nxgep,
1460 		    TCAM_CLASS_AH_ESP_IPV4,
1461 		    channel_cookie);
1462 		break;
1463 
1464 	case FSPEC_AHIP6:
1465 	case FSPEC_ESPIP6:
1466 		nxge_fill_tcam_entry_ah_esp_ipv6(nxgep,
1467 		    flow_spec, &tcam_ptr);
1468 		rdc_grp = nxge_get_rdc_group(nxgep,
1469 		    TCAM_CLASS_AH_ESP_IPV6,
1470 		    flow_cookie);
1471 		offset = nxge_get_rdc_offset(nxgep,
1472 		    TCAM_CLASS_AH_ESP_IPV6,
1473 		    channel_cookie);
1474 		break;
1475 
1476 	case FSPEC_IP_USR:
1477 		nxge_fill_tcam_entry_ip_usr(nxgep, flow_spec, &tcam_ptr,
1478 		    (tcam_class_t)class);
1479 		rdc_grp = nxge_get_rdc_group(nxgep,
1480 		    (tcam_class_t)class, flow_cookie);
1481 		offset = nxge_get_rdc_offset(nxgep,
1482 		    (tcam_class_t)class, channel_cookie);
1483 		break;
1484 	default:
1485 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1486 		    "nxge_add_tcam_entry: Unknown flow spec 0x%x",
1487 		    flow_spec->flow_type));
1488 		return (NXGE_ERROR);
1489 	}
1490 
1491 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1492 	    " nxge_add_tcam_entry write"
1493 	    " for location %d offset %d", location, offset));
1494 
1495 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1496 	rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr);
1497 
1498 	if (rs & NPI_FFLP_ERROR) {
1499 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1500 		    " nxge_add_tcam_entry write"
1501 		    " failed for location %d", location));
1502 		goto fail;
1503 	}
1504 
1505 	tcam_ptr.match_action.value = 0;
1506 	tcam_ptr.match_action.bits.ldw.rdctbl = rdc_grp;
1507 	tcam_ptr.match_action.bits.ldw.offset = offset;
1508 	tcam_ptr.match_action.bits.ldw.tres =
1509 	    TRES_TERM_OVRD_L2RDC;
1510 	if (channel_cookie == NXGE_PKT_DISCARD)
1511 		tcam_ptr.match_action.bits.ldw.disc = 1;
1512 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1513 	    location, tcam_ptr.match_action.value);
1514 	if (rs & NPI_FFLP_ERROR) {
1515 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1516 		    " nxge_add_tcam_entry write"
1517 		    " failed for ASC RAM location %d", location));
1518 		goto fail;
1519 	}
1520 	bcopy((void *) &tcam_ptr,
1521 	    (void *) &nxgep->classifier.tcam_entries[location].tce,
1522 	    sizeof (tcam_entry_t));
1523 	nxgep->classifier.tcam_entry_cnt++;
1524 	nxgep->classifier.tcam_entries[location].valid = 1;
1525 
1526 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1527 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry"));
1528 	return (NXGE_OK);
1529 fail:
1530 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1531 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_tcam_entry FAILED"));
1532 	return (NXGE_ERROR);
1533 }
1534 
1535 static nxge_status_t
1536 nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)
1537 {
1538 	tcam_entry_t tcam_ptr;
1539 	tcam_location_t location;
1540 	uint8_t class;
1541 	uint32_t class_config;
1542 	npi_handle_t handle;
1543 	npi_status_t rs = NPI_SUCCESS;
1544 	p_nxge_hw_list_t hw_p;
1545 	nxge_status_t status = NXGE_OK;
1546 
1547 	handle = nxgep->npi_reg_handle;
1548 	class = 0;
1549 	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1550 	tcam_ptr.ip4_noport_key = 1;
1551 	tcam_ptr.ip4_noport_mask = 1;
1552 	location = nxgep->function_num;
1553 	nxgep->classifier.fragment_bug_location = location;
1554 
1555 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1556 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1557 		    " nxge_tcam_handle_ip_fragment: common hardware not set",
1558 		    nxgep->niu_type));
1559 		return (NXGE_ERROR);
1560 	}
1561 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1562 	rs = npi_fflp_tcam_entry_write(handle,
1563 	    location, &tcam_ptr);
1564 
1565 	if (rs & NPI_FFLP_ERROR) {
1566 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1567 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1568 		    " nxge_tcam_handle_ip_fragment "
1569 		    " tcam_entry write"
1570 		    " failed for location %d", location));
1571 		return (NXGE_ERROR);
1572 	}
1573 	tcam_ptr.match_action.bits.ldw.rdctbl = nxgep->class_config.mac_rdcgrp;
1574 	tcam_ptr.match_action.bits.ldw.offset = 0;	/* use the default */
1575 	tcam_ptr.match_action.bits.ldw.tres =
1576 	    TRES_TERM_USE_OFFSET;
1577 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1578 	    location, tcam_ptr.match_action.value);
1579 
1580 	if (rs & NPI_FFLP_ERROR) {
1581 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1582 		NXGE_DEBUG_MSG((nxgep,
1583 		    FFLP_CTL,
1584 		    " nxge_tcam_handle_ip_fragment "
1585 		    " tcam_entry write"
1586 		    " failed for ASC RAM location %d", location));
1587 		return (NXGE_ERROR);
1588 	}
1589 	bcopy((void *) &tcam_ptr,
1590 	    (void *) &nxgep->classifier.tcam_entries[location].tce,
1591 	    sizeof (tcam_entry_t));
1592 	nxgep->classifier.tcam_entry_cnt++;
1593 	nxgep->classifier.tcam_entries[location].valid = 1;
1594 	for (class = TCAM_CLASS_TCP_IPV4;
1595 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
1596 		class_config = nxgep->class_config.class_cfg[class];
1597 		class_config |= NXGE_CLASS_TCAM_LOOKUP;
1598 		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1599 
1600 		if (status & NPI_FFLP_ERROR) {
1601 			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1602 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1603 			    "nxge_tcam_handle_ip_fragment "
1604 			    "nxge_fflp_ip_class_config failed "
1605 			    " class %d config %x ", class, class_config));
1606 			return (NXGE_ERROR);
1607 		}
1608 	}
1609 
1610 	rs = npi_fflp_cfg_tcam_enable(handle);
1611 	if (rs & NPI_FFLP_ERROR) {
1612 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1613 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1614 		    "nxge_tcam_handle_ip_fragment "
1615 		    " nxge_fflp_config_tcam_enable failed"));
1616 		return (NXGE_ERROR);
1617 	}
1618 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1619 	return (NXGE_OK);
1620 }
1621 
1622 /* ARGSUSED */
1623 static int
1624 nxge_flow_need_hash_lookup(p_nxge_t nxgep, flow_resource_t *flow_res)
1625 {
1626 	return (0);
1627 }
1628 
1629 nxge_status_t
1630 nxge_add_flow(p_nxge_t nxgep, flow_resource_t *flow_res)
1631 {
1632 
1633 	int insert_hash = 0;
1634 	nxge_status_t status = NXGE_OK;
1635 
1636 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1637 		/* determine whether to do TCAM or Hash flow */
1638 		insert_hash = nxge_flow_need_hash_lookup(nxgep, flow_res);
1639 	}
1640 	if (insert_hash) {
1641 		status = nxge_add_fcram_entry(nxgep, flow_res);
1642 	} else {
1643 		status = nxge_add_tcam_entry(nxgep, flow_res);
1644 	}
1645 	return (status);
1646 }
1647 
1648 void
1649 nxge_put_tcam(p_nxge_t nxgep, p_mblk_t mp)
1650 {
1651 	flow_resource_t *fs;
1652 
1653 	fs = (flow_resource_t *)mp->b_rptr;
1654 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1655 	    "nxge_put_tcam addr fs $%p  type %x offset %x",
1656 	    fs, fs->flow_spec.flow_type, fs->channel_cookie));
1657 	(void) nxge_add_tcam_entry(nxgep, fs);
1658 }
1659 
1660 nxge_status_t
1661 nxge_fflp_config_tcam_enable(p_nxge_t nxgep)
1662 {
1663 	npi_handle_t handle = nxgep->npi_reg_handle;
1664 	npi_status_t rs = NPI_SUCCESS;
1665 
1666 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_config_tcam_enable"));
1667 	rs = npi_fflp_cfg_tcam_enable(handle);
1668 	if (rs & NPI_FFLP_ERROR) {
1669 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1670 		    " nxge_fflp_config_tcam_enable failed"));
1671 		return (NXGE_ERROR | rs);
1672 	}
1673 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_config_tcam_enable"));
1674 	return (NXGE_OK);
1675 }
1676 
1677 nxge_status_t
1678 nxge_fflp_config_tcam_disable(p_nxge_t nxgep)
1679 {
1680 	npi_handle_t handle = nxgep->npi_reg_handle;
1681 	npi_status_t rs = NPI_SUCCESS;
1682 
1683 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1684 	    " ==> nxge_fflp_config_tcam_disable"));
1685 	rs = npi_fflp_cfg_tcam_disable(handle);
1686 	if (rs & NPI_FFLP_ERROR) {
1687 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1688 		    " nxge_fflp_config_tcam_disable failed"));
1689 		return (NXGE_ERROR | rs);
1690 	}
1691 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1692 	    " <== nxge_fflp_config_tcam_disable"));
1693 	return (NXGE_OK);
1694 }
1695 
1696 nxge_status_t
1697 nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)
1698 {
1699 	npi_handle_t handle = nxgep->npi_reg_handle;
1700 	npi_status_t rs = NPI_SUCCESS;
1701 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1702 	p_nxge_hw_pt_cfg_t p_cfgp;
1703 	uint8_t partition;
1704 
1705 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1706 	    " ==> nxge_fflp_config_hash_lookup_enable"));
1707 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1708 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1709 
1710 	for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) {
1711 		if (p_cfgp->grpids[partition]) {
1712 			rs = npi_fflp_cfg_fcram_partition_enable(
1713 			    handle, partition);
1714 			if (rs != NPI_SUCCESS) {
1715 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1716 				    " nxge_fflp_config_hash_lookup_enable"
1717 				    "failed FCRAM partition"
1718 				    " enable for partition %d ", partition));
1719 				return (NXGE_ERROR | rs);
1720 			}
1721 		}
1722 	}
1723 
1724 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1725 	    " <== nxge_fflp_config_hash_lookup_enable"));
1726 	return (NXGE_OK);
1727 }
1728 
1729 nxge_status_t
1730 nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)
1731 {
1732 	npi_handle_t handle = nxgep->npi_reg_handle;
1733 	npi_status_t rs = NPI_SUCCESS;
1734 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1735 	p_nxge_hw_pt_cfg_t p_cfgp;
1736 	uint8_t partition;
1737 
1738 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1739 	    " ==> nxge_fflp_config_hash_lookup_disable"));
1740 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1741 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1742 
1743 	for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) {
1744 		if (p_cfgp->grpids[partition]) {
1745 			rs = npi_fflp_cfg_fcram_partition_disable(handle,
1746 			    partition);
1747 			if (rs != NPI_SUCCESS) {
1748 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1749 				    " nxge_fflp_config_hash_lookup_disable"
1750 				    " failed FCRAM partition"
1751 				    " disable for partition %d ", partition));
1752 				return (NXGE_ERROR | rs);
1753 			}
1754 		}
1755 	}
1756 
1757 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1758 	    " <== nxge_fflp_config_hash_lookup_disable"));
1759 	return (NXGE_OK);
1760 }
1761 
1762 nxge_status_t
1763 nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)
1764 {
1765 	npi_handle_t handle = nxgep->npi_reg_handle;
1766 	npi_status_t rs = NPI_SUCCESS;
1767 
1768 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1769 	    " ==> nxge_fflp_config_llc_snap_enable"));
1770 	rs = npi_fflp_cfg_llcsnap_enable(handle);
1771 	if (rs & NPI_FFLP_ERROR) {
1772 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1773 		    " nxge_fflp_config_llc_snap_enable failed"));
1774 		return (NXGE_ERROR | rs);
1775 	}
1776 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1777 	    " <== nxge_fflp_config_llc_snap_enable"));
1778 	return (NXGE_OK);
1779 }
1780 
1781 nxge_status_t
1782 nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)
1783 {
1784 	npi_handle_t handle = nxgep->npi_reg_handle;
1785 	npi_status_t rs = NPI_SUCCESS;
1786 
1787 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1788 	    " ==> nxge_fflp_config_llc_snap_disable"));
1789 	rs = npi_fflp_cfg_llcsnap_disable(handle);
1790 	if (rs & NPI_FFLP_ERROR) {
1791 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1792 		    " nxge_fflp_config_llc_snap_disable failed"));
1793 		return (NXGE_ERROR | rs);
1794 	}
1795 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1796 	    " <== nxge_fflp_config_llc_snap_disable"));
1797 	return (NXGE_OK);
1798 }
1799 
1800 nxge_status_t
1801 nxge_fflp_ip_usr_class_config(p_nxge_t nxgep, tcam_class_t class,
1802 	uint32_t config)
1803 {
1804 	npi_status_t rs = NPI_SUCCESS;
1805 	npi_handle_t handle = nxgep->npi_reg_handle;
1806 	uint8_t tos, tos_mask, proto, ver = 0;
1807 	uint8_t class_enable = 0;
1808 
1809 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_usr_class_config"));
1810 
1811 	tos = (config & NXGE_CLASS_CFG_IP_TOS_MASK) >>
1812 	    NXGE_CLASS_CFG_IP_TOS_SHIFT;
1813 	tos_mask = (config & NXGE_CLASS_CFG_IP_TOS_MASK_MASK) >>
1814 	    NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT;
1815 	proto = (config & NXGE_CLASS_CFG_IP_PROTO_MASK) >>
1816 	    NXGE_CLASS_CFG_IP_PROTO_SHIFT;
1817 	if (config & NXGE_CLASS_CFG_IP_IPV6_MASK)
1818 		ver = 1;
1819 	if (config & NXGE_CLASS_CFG_IP_ENABLE_MASK)
1820 		class_enable = 1;
1821 	rs = npi_fflp_cfg_ip_usr_cls_set(handle, class, tos, tos_mask,
1822 	    proto, ver);
1823 	if (rs & NPI_FFLP_ERROR) {
1824 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1825 		    " nxge_fflp_ip_usr_class_config"
1826 		    " for class %d failed ", class));
1827 		return (NXGE_ERROR | rs);
1828 	}
1829 	if (class_enable)
1830 		rs = npi_fflp_cfg_ip_usr_cls_enable(handle, class);
1831 	else
1832 		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
1833 
1834 	if (rs & NPI_FFLP_ERROR) {
1835 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1836 		    " nxge_fflp_ip_usr_class_config"
1837 		    " TCAM enable/disable for class %d failed ", class));
1838 		return (NXGE_ERROR | rs);
1839 	}
1840 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_usr_class_config"));
1841 	return (NXGE_OK);
1842 }
1843 
1844 nxge_status_t
1845 nxge_fflp_ip_class_config(p_nxge_t nxgep, tcam_class_t class, uint32_t config)
1846 {
1847 	uint32_t class_config;
1848 	nxge_status_t t_status = NXGE_OK;
1849 	nxge_status_t f_status = NXGE_OK;
1850 	p_nxge_class_pt_cfg_t p_class_cfgp;
1851 
1852 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1853 
1854 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1855 	class_config = p_class_cfgp->class_cfg[class];
1856 
1857 	if (class_config != config) {
1858 		p_class_cfgp->class_cfg[class] = config;
1859 		class_config = config;
1860 	}
1861 
1862 	t_status = nxge_cfg_tcam_ip_class(nxgep, class, class_config);
1863 	f_status = nxge_cfg_ip_cls_flow_key(nxgep, class, class_config);
1864 
1865 	if (t_status & NPI_FFLP_ERROR) {
1866 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1867 		    " nxge_fflp_ip_class_config %x"
1868 		    " for class %d tcam failed", config, class));
1869 		return (t_status);
1870 	}
1871 	if (f_status & NPI_FFLP_ERROR) {
1872 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1873 		    " nxge_fflp_ip_class_config %x"
1874 		    " for class %d flow key failed", config, class));
1875 		return (f_status);
1876 	}
1877 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1878 	return (NXGE_OK);
1879 }
1880 
1881 nxge_status_t
1882 nxge_fflp_ip_class_config_get(p_nxge_t nxgep, tcam_class_t class,
1883 	uint32_t *config)
1884 {
1885 	uint32_t t_class_config, f_class_config;
1886 	int t_status = NXGE_OK;
1887 	int f_status = NXGE_OK;
1888 
1889 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1890 
1891 	t_class_config = f_class_config = 0;
1892 	t_status = nxge_cfg_tcam_ip_class_get(nxgep, class, &t_class_config);
1893 	f_status = nxge_cfg_ip_cls_flow_key_get(nxgep, class, &f_class_config);
1894 
1895 	if (t_status & NPI_FFLP_ERROR) {
1896 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1897 		    " nxge_fflp_ip_class_config_get  "
1898 		    " for class %d tcam failed", class));
1899 		return (t_status);
1900 	}
1901 
1902 	if (f_status & NPI_FFLP_ERROR) {
1903 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1904 		    " nxge_fflp_ip_class_config_get  "
1905 		    " for class %d flow key failed", class));
1906 		return (f_status);
1907 	}
1908 
1909 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1910 	    " nxge_fflp_ip_class_config tcam %x flow %x",
1911 	    t_class_config, f_class_config));
1912 
1913 	*config = t_class_config | f_class_config;
1914 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config_get"));
1915 	return (NXGE_OK);
1916 }
1917 
1918 nxge_status_t
1919 nxge_fflp_ip_class_config_all(p_nxge_t nxgep)
1920 {
1921 	uint32_t class_config;
1922 	tcam_class_t class;
1923 
1924 #ifdef	NXGE_DEBUG
1925 	int status = NXGE_OK;
1926 #endif
1927 
1928 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_class_config"));
1929 	for (class = TCAM_CLASS_TCP_IPV4;
1930 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
1931 		class_config = nxgep->class_config.class_cfg[class];
1932 #ifndef	NXGE_DEBUG
1933 		(void) nxge_fflp_ip_class_config(nxgep, class, class_config);
1934 #else
1935 		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1936 		if (status & NPI_FFLP_ERROR) {
1937 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1938 			    "nxge_fflp_ip_class_config failed "
1939 			    " class %d config %x ",
1940 			    class, class_config));
1941 		}
1942 #endif
1943 	}
1944 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1945 	return (NXGE_OK);
1946 }
1947 
1948 nxge_status_t
1949 nxge_fflp_config_vlan_table(p_nxge_t nxgep, uint16_t vlan_id)
1950 {
1951 	uint8_t port, rdc_grp;
1952 	npi_handle_t handle;
1953 	npi_status_t rs = NPI_SUCCESS;
1954 	uint8_t priority = 1;
1955 	p_nxge_mv_cfg_t vlan_table;
1956 	p_nxge_class_pt_cfg_t p_class_cfgp;
1957 	p_nxge_hw_list_t hw_p;
1958 
1959 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_config_vlan_table"));
1960 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1961 	handle = nxgep->npi_reg_handle;
1962 	vlan_table = p_class_cfgp->vlan_tbl;
1963 	port = nxgep->function_num;
1964 
1965 	if (vlan_table[vlan_id].flag == 0) {
1966 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1967 		    " nxge_fflp_config_vlan_table"
1968 		    " vlan id is not configured %d", vlan_id));
1969 		return (NXGE_ERROR);
1970 	}
1971 
1972 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1973 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1974 		    " nxge_fflp_config_vlan_table:"
1975 		    " common hardware not set", nxgep->niu_type));
1976 		return (NXGE_ERROR);
1977 	}
1978 	MUTEX_ENTER(&hw_p->nxge_vlan_lock);
1979 	rdc_grp = vlan_table[vlan_id].rdctbl;
1980 	rs = npi_fflp_cfg_enet_vlan_table_assoc(handle,
1981 	    port, vlan_id,
1982 	    rdc_grp, priority);
1983 
1984 	MUTEX_EXIT(&hw_p->nxge_vlan_lock);
1985 	if (rs & NPI_FFLP_ERROR) {
1986 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1987 		    "nxge_fflp_config_vlan_table failed "
1988 		    " Port %d vlan_id %d rdc_grp %d",
1989 		    port, vlan_id, rdc_grp));
1990 		return (NXGE_ERROR | rs);
1991 	}
1992 
1993 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_config_vlan_table"));
1994 	return (NXGE_OK);
1995 }
1996 
1997 nxge_status_t
1998 nxge_fflp_update_hw(p_nxge_t nxgep)
1999 {
2000 	nxge_status_t status = NXGE_OK;
2001 	p_nxge_param_t pa;
2002 	uint64_t cfgd_vlans;
2003 	uint64_t *val_ptr;
2004 	int i;
2005 	int num_macs;
2006 	uint8_t alt_mac;
2007 	nxge_param_map_t *p_map;
2008 	p_nxge_mv_cfg_t vlan_table;
2009 	p_nxge_class_pt_cfg_t p_class_cfgp;
2010 	p_nxge_dma_pt_cfg_t p_all_cfgp;
2011 	p_nxge_hw_pt_cfg_t p_cfgp;
2012 
2013 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_update_hw"));
2014 
2015 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
2016 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2017 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2018 
2019 	status = nxge_fflp_set_hash1(nxgep, p_class_cfgp->init_h1);
2020 	if (status != NXGE_OK) {
2021 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2022 		    "nxge_fflp_set_hash1 Failed"));
2023 		return (NXGE_ERROR);
2024 	}
2025 
2026 	status = nxge_fflp_set_hash2(nxgep, p_class_cfgp->init_h2);
2027 	if (status != NXGE_OK) {
2028 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2029 		    "nxge_fflp_set_hash2 Failed"));
2030 		return (NXGE_ERROR);
2031 	}
2032 	vlan_table = p_class_cfgp->vlan_tbl;
2033 
2034 	/* configure vlan tables */
2035 	pa = (p_nxge_param_t)&nxgep->param_arr[param_vlan_2rdc_grp];
2036 #if defined(__i386)
2037 	val_ptr = (uint64_t *)(uint32_t)pa->value;
2038 #else
2039 	val_ptr = (uint64_t *)pa->value;
2040 #endif
2041 	cfgd_vlans = ((pa->type & NXGE_PARAM_ARRAY_CNT_MASK) >>
2042 	    NXGE_PARAM_ARRAY_CNT_SHIFT);
2043 
2044 	for (i = 0; i < cfgd_vlans; i++) {
2045 		p_map = (nxge_param_map_t *)&val_ptr[i];
2046 		if (vlan_table[p_map->param_id].flag) {
2047 			status = nxge_fflp_config_vlan_table(nxgep,
2048 			    p_map->param_id);
2049 			if (status != NXGE_OK) {
2050 				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2051 				    "nxge_fflp_config_vlan_table Failed"));
2052 				return (NXGE_ERROR);
2053 			}
2054 		}
2055 	}
2056 
2057 	/* config MAC addresses */
2058 	num_macs = p_cfgp->max_macs;
2059 	pa = (p_nxge_param_t)&nxgep->param_arr[param_mac_2rdc_grp];
2060 #if defined(__i386)
2061 	val_ptr = (uint64_t *)(uint32_t)pa->value;
2062 #else
2063 	val_ptr = (uint64_t *)pa->value;
2064 #endif
2065 
2066 	for (alt_mac = 0; alt_mac < num_macs; alt_mac++) {
2067 		if (p_class_cfgp->mac_host_info[alt_mac].flag) {
2068 			status = nxge_logical_mac_assign_rdc_table(nxgep,
2069 			    alt_mac);
2070 			if (status != NXGE_OK) {
2071 				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2072 				    "nxge_logical_mac_assign_rdc_table"
2073 				    " Failed"));
2074 				return (NXGE_ERROR);
2075 			}
2076 		}
2077 	}
2078 
2079 	/* Config Hash values */
2080 	/* config classes */
2081 	status = nxge_fflp_ip_class_config_all(nxgep);
2082 	if (status != NXGE_OK) {
2083 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2084 		    "nxge_fflp_ip_class_config_all Failed"));
2085 		return (NXGE_ERROR);
2086 	}
2087 	return (NXGE_OK);
2088 }
2089 
2090 nxge_status_t
2091 nxge_classify_init_hw(p_nxge_t nxgep)
2092 {
2093 	nxge_status_t status = NXGE_OK;
2094 
2095 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_hw"));
2096 
2097 	if (nxgep->classifier.state & NXGE_FFLP_HW_INIT) {
2098 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2099 		    "nxge_classify_init_hw already init"));
2100 		return (NXGE_OK);
2101 	}
2102 
2103 	/* Now do a real configuration */
2104 	status = nxge_fflp_update_hw(nxgep);
2105 	if (status != NXGE_OK) {
2106 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2107 		    "nxge_fflp_update_hw failed"));
2108 		return (NXGE_ERROR);
2109 	}
2110 
2111 	/* Init RDC tables? ? who should do that? rxdma or fflp ? */
2112 	/* attach rdc table to the MAC port. */
2113 	status = nxge_main_mac_assign_rdc_table(nxgep);
2114 	if (status != NXGE_OK) {
2115 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2116 		    "nxge_main_mac_assign_rdc_table failed"));
2117 		return (NXGE_ERROR);
2118 	}
2119 
2120 	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
2121 	if (status != NXGE_OK) {
2122 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2123 		    "nxge_multicast_mac_assign_rdc_table failed"));
2124 		return (NXGE_ERROR);
2125 	}
2126 
2127 	if (nxgep->classifier.fragment_bug == 1) {
2128 		status = nxge_tcam_handle_ip_fragment(nxgep);
2129 		if (status != NXGE_OK) {
2130 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2131 			    "nxge_tcam_handle_ip_fragment failed"));
2132 			return (NXGE_ERROR);
2133 		}
2134 	}
2135 
2136 	nxgep->classifier.state |= NXGE_FFLP_HW_INIT;
2137 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_hw"));
2138 	return (NXGE_OK);
2139 }
2140 
2141 nxge_status_t
2142 nxge_fflp_handle_sys_errors(p_nxge_t nxgep)
2143 {
2144 	npi_handle_t handle;
2145 	p_nxge_fflp_stats_t statsp;
2146 	uint8_t portn, rdc_grp;
2147 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
2148 	p_nxge_hw_pt_cfg_t p_cfgp;
2149 	vlan_par_err_t vlan_err;
2150 	tcam_err_t tcam_err;
2151 	hash_lookup_err_log1_t fcram1_err;
2152 	hash_lookup_err_log2_t fcram2_err;
2153 	hash_tbl_data_log_t fcram_err;
2154 
2155 	handle = nxgep->npi_handle;
2156 	statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
2157 	portn = nxgep->mac.portnum;
2158 
2159 	/*
2160 	 * need to read the fflp error registers to figure out what the error
2161 	 * is
2162 	 */
2163 	npi_fflp_vlan_error_get(handle, &vlan_err);
2164 	npi_fflp_tcam_error_get(handle, &tcam_err);
2165 
2166 	if (vlan_err.bits.ldw.m_err || vlan_err.bits.ldw.err) {
2167 		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2168 		    " vlan table parity error on port %d"
2169 		    " addr: 0x%x data: 0x%x",
2170 		    portn, vlan_err.bits.ldw.addr,
2171 		    vlan_err.bits.ldw.data));
2172 		statsp->vlan_parity_err++;
2173 
2174 		if (vlan_err.bits.ldw.m_err) {
2175 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2176 			    " vlan table multiple errors on port %d",
2177 			    portn));
2178 		}
2179 		statsp->errlog.vlan = (uint32_t)vlan_err.value;
2180 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2181 		    NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR);
2182 		npi_fflp_vlan_error_clear(handle);
2183 	}
2184 
2185 	if (tcam_err.bits.ldw.err) {
2186 		if (tcam_err.bits.ldw.p_ecc != 0) {
2187 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2188 			    " TCAM ECC error on port %d"
2189 			    " TCAM entry: 0x%x syndrome: 0x%x",
2190 			    portn, tcam_err.bits.ldw.addr,
2191 			    tcam_err.bits.ldw.syndrome));
2192 			statsp->tcam_ecc_err++;
2193 		} else {
2194 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2195 			    " TCAM Parity error on port %d"
2196 			    " addr: 0x%x parity value: 0x%x",
2197 			    portn, tcam_err.bits.ldw.addr,
2198 			    tcam_err.bits.ldw.syndrome));
2199 			statsp->tcam_parity_err++;
2200 		}
2201 
2202 		if (tcam_err.bits.ldw.mult) {
2203 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2204 			    " TCAM Multiple errors on port %d", portn));
2205 		} else {
2206 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2207 			    " TCAM PIO error on port %d", portn));
2208 		}
2209 
2210 		statsp->errlog.tcam = (uint32_t)tcam_err.value;
2211 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2212 		    NXGE_FM_EREPORT_FFLP_TCAM_ERR);
2213 		npi_fflp_tcam_error_clear(handle);
2214 	}
2215 
2216 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2217 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
2218 
2219 	for (rdc_grp = 0; rdc_grp < NXGE_MAX_RDC_GROUPS; rdc_grp++) {
2220 		if (p_cfgp->grpids[rdc_grp]) {
2221 			npi_fflp_fcram_error_get(handle, &fcram_err, rdc_grp);
2222 			if (fcram_err.bits.ldw.pio_err) {
2223 				NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2224 				    " FCRAM PIO ECC error on port %d"
2225 				    " rdc group: %d Hash Table addr: 0x%x"
2226 				    " syndrome: 0x%x",
2227 				    portn, rdc_grp,
2228 				    fcram_err.bits.ldw.fcram_addr,
2229 				    fcram_err.bits.ldw.syndrome));
2230 				statsp->hash_pio_err[rdc_grp]++;
2231 				statsp->errlog.hash_pio[rdc_grp] =
2232 				    (uint32_t)fcram_err.value;
2233 				NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2234 				    NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR);
2235 				npi_fflp_fcram_error_clear(handle, rdc_grp);
2236 			}
2237 		}
2238 	}
2239 
2240 	npi_fflp_fcram_error_log1_get(handle, &fcram1_err);
2241 	if (fcram1_err.bits.ldw.ecc_err) {
2242 		char *multi_str = "";
2243 		char *multi_bit_str = "";
2244 
2245 		npi_fflp_fcram_error_log2_get(handle, &fcram2_err);
2246 		if (fcram1_err.bits.ldw.mult_lk) {
2247 			multi_str = "multiple";
2248 		}
2249 		if (fcram1_err.bits.ldw.mult_bit) {
2250 			multi_bit_str = "multiple bits";
2251 		}
2252 		statsp->hash_lookup_err++;
2253 		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2254 		    " FCRAM %s lookup %s ECC error on port %d"
2255 		    " H1: 0x%x Subarea: 0x%x Syndrome: 0x%x",
2256 		    multi_str, multi_bit_str, portn,
2257 		    fcram2_err.bits.ldw.h1,
2258 		    fcram2_err.bits.ldw.subarea,
2259 		    fcram2_err.bits.ldw.syndrome));
2260 		NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2261 		    NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR);
2262 	}
2263 	statsp->errlog.hash_lookup1 = (uint32_t)fcram1_err.value;
2264 	statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value;
2265 	return (NXGE_OK);
2266 }
2267 
2268 int
2269 nxge_get_valid_tcam_cnt(p_nxge_t nxgep) {
2270 	return ((nxgep->classifier.fragment_bug == 1) ?
2271 		nxgep->classifier.tcam_entry_cnt - 1 :
2272 		nxgep->classifier.tcam_entry_cnt);
2273 }
2274 
2275 int
2276 nxge_rxdma_channel_cnt(p_nxge_t nxgep)
2277 {
2278 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
2279 	p_nxge_hw_pt_cfg_t p_cfgp;
2280 
2281 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2282 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
2283 	return (p_cfgp->max_rdcs);
2284 }
2285 
2286 /* ARGSUSED */
2287 int
2288 nxge_rxclass_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
2289 {
2290 	uint32_t cmd;
2291 	rx_class_cfg_t *cfg_info = (rx_class_cfg_t *)mp->b_rptr;
2292 
2293 	if (nxgep == NULL) {
2294 		return (-1);
2295 	}
2296 	cmd = cfg_info->cmd;
2297 	switch (cmd) {
2298 	default:
2299 		return (-1);
2300 
2301 	case NXGE_RX_CLASS_GCHAN:
2302 		cfg_info->data = nxge_rxdma_channel_cnt(nxgep);
2303 		break;
2304 	case NXGE_RX_CLASS_GRULE_CNT:
2305 		MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2306 		cfg_info->rule_cnt = nxge_get_valid_tcam_cnt(nxgep);
2307 		MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2308 		break;
2309 	case NXGE_RX_CLASS_GRULE:
2310 		nxge_get_tcam_entry(nxgep, &cfg_info->fs);
2311 		break;
2312 	case NXGE_RX_CLASS_GRULE_ALL:
2313 		nxge_get_tcam_entry_all(nxgep, cfg_info);
2314 		break;
2315 	case NXGE_RX_CLASS_RULE_DEL:
2316 		nxge_del_tcam_entry(nxgep, cfg_info->fs.location);
2317 		break;
2318 	case NXGE_RX_CLASS_RULE_INS:
2319 		(void) nxge_add_tcam_entry(nxgep, &cfg_info->fs);
2320 		break;
2321 	}
2322 	return (0);
2323 }
2324 /* ARGSUSED */
2325 int
2326 nxge_rxhash_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
2327 {
2328 	uint32_t cmd;
2329 	cfg_cmd_t	*cfg_info = (cfg_cmd_t *)mp->b_rptr;
2330 
2331 	if (nxgep == NULL) {
2332 		return (-1);
2333 	}
2334 	cmd = cfg_info->cmd;
2335 
2336 	switch (cmd) {
2337 	default:
2338 		return (-1);
2339 	case NXGE_IPTUN_CFG_ADD_CLS:
2340 		nxge_add_iptun_class(nxgep, &cfg_info->iptun_cfg,
2341 		    &cfg_info->class_id);
2342 		break;
2343 	case NXGE_IPTUN_CFG_SET_HASH:
2344 		nxge_cfg_iptun_hash(nxgep, &cfg_info->iptun_cfg,
2345 		    cfg_info->class_id);
2346 		break;
2347 	case NXGE_IPTUN_CFG_DEL_CLS:
2348 		nxge_del_iptun_class(nxgep, cfg_info->class_id);
2349 		break;
2350 	case NXGE_IPTUN_CFG_GET_CLS:
2351 		nxge_get_iptun_class(nxgep, &cfg_info->iptun_cfg,
2352 		    cfg_info->class_id);
2353 		break;
2354 	case NXGE_CLS_CFG_SET_SYM:
2355 		nxge_set_ip_cls_sym(nxgep, cfg_info->class_id, cfg_info->sym);
2356 		break;
2357 	case NXGE_CLS_CFG_GET_SYM:
2358 		nxge_get_ip_cls_sym(nxgep, cfg_info->class_id, &cfg_info->sym);
2359 		break;
2360 	}
2361 	return (0);
2362 }
2363 
2364 void
2365 nxge_get_tcam_entry_all(p_nxge_t nxgep, rx_class_cfg_t *cfgp)
2366 {
2367 	nxge_classify_t *clasp = &nxgep->classifier;
2368 	uint16_t	n_entries;
2369 	int		i, j, k;
2370 	tcam_flow_spec_t	*tcam_entryp;
2371 
2372 	cfgp->data = clasp->tcam_size;
2373 	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2374 	n_entries = cfgp->rule_cnt;
2375 
2376 	for (i = 0, j = 0; j < cfgp->data; j++) {
2377 		k = nxge_tcam_get_index(nxgep, j);
2378 		tcam_entryp = &clasp->tcam_entries[k];
2379 		if (tcam_entryp->valid != 1)
2380 			continue;
2381 		cfgp->rule_locs[i] = j;
2382 		i++;
2383 	};
2384 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2385 
2386 	if (n_entries != i) {
2387 		/* print warning, this should not happen */
2388 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_tcam_entry_all"
2389 		    "n_entries[%d] != i[%d]!!!", n_entries, i));
2390 	}
2391 }
2392 
2393 
2394 /* Entries for the ports are interleaved in the TCAM */
2395 static uint16_t
2396 nxge_tcam_get_index(p_nxge_t nxgep, uint16_t index)
2397 {
2398 	/* One entry reserved for IP fragment rule */
2399 	if (index >= (nxgep->classifier.tcam_size - 1))
2400 		index = 0;
2401 	if (nxgep->classifier.fragment_bug == 1)
2402 		index++;
2403 	return (nxgep->classifier.tcam_top + (index * nxgep->nports));
2404 }
2405 
2406 static uint32_t
2407 nxge_tcam_cls_to_flow(uint32_t class_code) {
2408 	switch (class_code) {
2409 	case TCAM_CLASS_TCP_IPV4:
2410 		return (FSPEC_TCPIP4);
2411 	case TCAM_CLASS_UDP_IPV4:
2412 		return (FSPEC_UDPIP4);
2413 	case TCAM_CLASS_AH_ESP_IPV4:
2414 		return (FSPEC_AHIP4);
2415 	case TCAM_CLASS_SCTP_IPV4:
2416 		return (FSPEC_SCTPIP4);
2417 	case  TCAM_CLASS_TCP_IPV6:
2418 		return (FSPEC_TCPIP6);
2419 	case TCAM_CLASS_UDP_IPV6:
2420 		return (FSPEC_UDPIP6);
2421 	case TCAM_CLASS_AH_ESP_IPV6:
2422 		return (FSPEC_AHIP6);
2423 	case TCAM_CLASS_SCTP_IPV6:
2424 		return (FSPEC_SCTPIP6);
2425 	case TCAM_CLASS_IP_USER_4:
2426 	case TCAM_CLASS_IP_USER_5:
2427 	case TCAM_CLASS_IP_USER_6:
2428 	case TCAM_CLASS_IP_USER_7:
2429 		return (FSPEC_IP_USR);
2430 	default:
2431 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "nxge_tcam_cls_to_flow"
2432 		    ": Unknown class code [0x%x]", class_code));
2433 		break;
2434 	}
2435 	return (0);
2436 }
2437 
2438 void
2439 nxge_get_tcam_entry(p_nxge_t nxgep, flow_resource_t *fs)
2440 {
2441 	uint16_t 	index;
2442 	tcam_flow_spec_t *tcam_ep;
2443 	tcam_entry_t	*tp;
2444 	flow_spec_t	*fspec;
2445 	tcpip4_spec_t 	*fspec_key;
2446 	tcpip4_spec_t 	*fspec_mask;
2447 
2448 	index = nxge_tcam_get_index(nxgep, (uint16_t)fs->location);
2449 	tcam_ep = &nxgep->classifier.tcam_entries[index];
2450 	if (tcam_ep->valid != 1) {
2451 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_tcam_entry: :"
2452 		    "Entry [%d] invalid for index [%d]", fs->location, index));
2453 		return;
2454 	}
2455 
2456 	/* Fill the flow spec entry */
2457 	tp = &tcam_ep->tce;
2458 	fspec = &fs->flow_spec;
2459 	fspec->flow_type = nxge_tcam_cls_to_flow(tp->ip4_class_key);
2460 
2461 	/* TODO - look at proto field to differentiate between AH and ESP */
2462 	if (fspec->flow_type == FSPEC_AHIP4) {
2463 		if (tp->ip4_proto_key == IPPROTO_ESP)
2464 			fspec->flow_type = FSPEC_ESPIP4;
2465 	}
2466 
2467 	switch (tp->ip4_class_key) {
2468 	case TCAM_CLASS_TCP_IPV4:
2469 	case TCAM_CLASS_UDP_IPV4:
2470 	case TCAM_CLASS_AH_ESP_IPV4:
2471 	case TCAM_CLASS_SCTP_IPV4:
2472 		fspec_key = (tcpip4_spec_t *)&fspec->uh.tcpip4spec;
2473 		fspec_mask = (tcpip4_spec_t *)&fspec->um.tcpip4spec;
2474 		FSPEC_IPV4_ADDR(fspec_key->ip4dst, tp->ip4_dest_key);
2475 		FSPEC_IPV4_ADDR(fspec_mask->ip4dst, tp->ip4_dest_mask);
2476 		FSPEC_IPV4_ADDR(fspec_key->ip4src, tp->ip4_src_key);
2477 		FSPEC_IPV4_ADDR(fspec_mask->ip4src, tp->ip4_src_mask);
2478 		fspec_key->tos = tp->ip4_tos_key;
2479 		fspec_mask->tos = tp->ip4_tos_mask;
2480 		break;
2481 	default:
2482 		break;
2483 	}
2484 
2485 	switch (tp->ip4_class_key) {
2486 	case TCAM_CLASS_TCP_IPV4:
2487 	case TCAM_CLASS_UDP_IPV4:
2488 	case TCAM_CLASS_SCTP_IPV4:
2489 		FSPEC_IP_PORTS(fspec_key->pdst, fspec_key->psrc,
2490 		    tp->ip4_port_key);
2491 		FSPEC_IP_PORTS(fspec_mask->pdst, fspec_mask->psrc,
2492 		    tp->ip4_port_mask);
2493 		break;
2494 	case TCAM_CLASS_AH_ESP_IPV4:
2495 		fspec->uh.ahip4spec.spi = tp->ip4_port_key;
2496 		fspec->um.ahip4spec.spi = tp->ip4_port_mask;
2497 		break;
2498 	case TCAM_CLASS_IP_USER_4:
2499 	case TCAM_CLASS_IP_USER_5:
2500 	case TCAM_CLASS_IP_USER_6:
2501 	case TCAM_CLASS_IP_USER_7:
2502 		fspec->uh.ip_usr_spec.l4_4_bytes = tp->ip4_port_key;
2503 		fspec->um.ip_usr_spec.l4_4_bytes = tp->ip4_port_mask;
2504 		fspec->uh.ip_usr_spec.ip_ver = FSPEC_IP4;
2505 		fspec->uh.ip_usr_spec.proto = tp->ip4_proto_key;
2506 		fspec->um.ip_usr_spec.proto = tp->ip4_proto_mask;
2507 		break;
2508 	default:
2509 		break;
2510 	}
2511 
2512 	if (tp->match_action.bits.ldw.disc == 1) {
2513 		fs->channel_cookie = NXGE_PKT_DISCARD;
2514 	} else {
2515 		fs->channel_cookie = tp->match_action.bits.ldw.offset;
2516 	}
2517 }
2518 
2519 void
2520 nxge_del_tcam_entry(p_nxge_t nxgep, uint32_t location)
2521 {
2522 	npi_status_t rs = NPI_SUCCESS;
2523 	uint16_t 	index;
2524 	tcam_flow_spec_t *tcam_ep;
2525 	tcam_entry_t	*tp;
2526 	tcam_class_t	class;
2527 
2528 	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2529 	index = nxge_tcam_get_index(nxgep, (uint16_t)location);
2530 	tcam_ep = &nxgep->classifier.tcam_entries[index];
2531 	if (tcam_ep->valid != 1) {
2532 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_tcam_entry: :"
2533 		    "Entry [%d] invalid for index [%d]", location, index));
2534 		goto fail;
2535 	}
2536 
2537 	/* Fill the flow spec entry */
2538 	tp = &tcam_ep->tce;
2539 	class = tp->ip4_class_key;
2540 	if (class >= TCAM_CLASS_IP_USER_4 && class <= TCAM_CLASS_IP_USER_7) {
2541 		int i;
2542 		nxge_usr_l3_cls_t *l3_ucls_p;
2543 		p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2544 
2545 		for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2546 			l3_ucls_p = &hw_p->tcam_l3_prog_cls[i];
2547 			if (l3_ucls_p->valid) {
2548 				if (l3_ucls_p->cls == class &&
2549 				    l3_ucls_p->tcam_ref_cnt) {
2550 					l3_ucls_p->tcam_ref_cnt--;
2551 					if (l3_ucls_p->tcam_ref_cnt > 0)
2552 						continue;
2553 					/* disable class */
2554 					rs = npi_fflp_cfg_ip_usr_cls_disable(
2555 					    nxgep->npi_reg_handle,
2556 					    (tcam_class_t)class);
2557 					if (rs != NPI_SUCCESS)
2558 						goto fail;
2559 					l3_ucls_p->cls = 0;
2560 					l3_ucls_p->pid = 0;
2561 					l3_ucls_p->valid = 0;
2562 					break;
2563 				}
2564 			}
2565 		}
2566 		if (i == NXGE_L3_PROG_CLS) {
2567 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2568 			    "nxge_del_tcam_entry: Usr class "
2569 			    "0x%llx not found", (unsigned long long) class));
2570 			goto fail;
2571 		}
2572 	}
2573 
2574 	rs = npi_fflp_tcam_entry_invalidate(nxgep->npi_reg_handle, index);
2575 	if (rs != NPI_SUCCESS) {
2576 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2577 		    "nxge_del_tcam_entry: TCAM invalidate failed "
2578 		    "at loc %d ", location));
2579 		goto fail;
2580 	}
2581 
2582 	nxgep->classifier.tcam_entries[index].valid = 0;
2583 	nxgep->classifier.tcam_entry_cnt--;
2584 
2585 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2586 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_del_tcam_entry"));
2587 	return;
2588 fail:
2589 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2590 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2591 	    "<== nxge_del_tcam_entry FAILED"));
2592 }
2593 
2594 static uint8_t
2595 nxge_iptun_pkt_type_to_pid(uint8_t pkt_type)
2596 {
2597 	uint8_t pid = 0;
2598 
2599 	switch (pkt_type) {
2600 	case IPTUN_PKT_IPV4:
2601 		pid = 4;
2602 		break;
2603 	case IPTUN_PKT_IPV6:
2604 		pid = 41;
2605 		break;
2606 	case IPTUN_PKT_GRE:
2607 		pid = 47;
2608 		break;
2609 	case IPTUN_PKT_GTP:
2610 		pid = 17;
2611 		break;
2612 	default:
2613 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
2614 		    "nxge_iptun_pkt_type_to_pid: Unknown pkt type 0x%x",
2615 		    pkt_type));
2616 		break;
2617 	}
2618 
2619 	return (pid);
2620 }
2621 
2622 static npi_status_t
2623 nxge_set_iptun_usr_cls_reg(p_nxge_t nxgep, uint64_t class,
2624 		iptun_cfg_t *iptunp)
2625 {
2626 	npi_handle_t handle = nxgep->npi_reg_handle;
2627 	npi_status_t rs = NPI_SUCCESS;
2628 
2629 	switch (iptunp->in_pkt_type) {
2630 	case IPTUN_PKT_IPV4:
2631 	case IPTUN_PKT_IPV6:
2632 		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2633 		    (tcam_class_t)class, 0, 0, 0, 0);
2634 		break;
2635 	case IPTUN_PKT_GRE:
2636 		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2637 		    (tcam_class_t)class, iptunp->l4b0_val,
2638 		    iptunp->l4b0_mask, 0, 0);
2639 		break;
2640 	case IPTUN_PKT_GTP:
2641 		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2642 		    (tcam_class_t)class, 0, 0, iptunp->l4b23_val,
2643 		    (iptunp->l4b23_sel & 0x01));
2644 		break;
2645 	default:
2646 		rs = NPI_FFLP_TCAM_CLASS_INVALID;
2647 		break;
2648 	}
2649 	return (rs);
2650 }
2651 
2652 void
2653 nxge_add_iptun_class(p_nxge_t nxgep, iptun_cfg_t *iptunp,
2654 		uint8_t *cls_idp)
2655 {
2656 	int i, add_cls;
2657 	uint8_t pid;
2658 	uint64_t class;
2659 	p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2660 	npi_handle_t handle = nxgep->npi_reg_handle;
2661 	npi_status_t rs = NPI_SUCCESS;
2662 
2663 	pid = nxge_iptun_pkt_type_to_pid(iptunp->in_pkt_type);
2664 	if (pid == 0)
2665 		return;
2666 
2667 	add_cls = 0;
2668 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
2669 
2670 	/* Get an user programmable class ID */
2671 	for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2672 		if (hw_p->tcam_l3_prog_cls[i].valid == 0) {
2673 			/* todo add new usr class reg */
2674 			switch (i) {
2675 			case 0:
2676 				class = TCAM_CLASS_IP_USER_4;
2677 				break;
2678 			case 1:
2679 				class = TCAM_CLASS_IP_USER_5;
2680 				break;
2681 			case 2:
2682 				class = TCAM_CLASS_IP_USER_6;
2683 				break;
2684 			case 3:
2685 				class = TCAM_CLASS_IP_USER_7;
2686 				break;
2687 			default:
2688 				break;
2689 			}
2690 			rs = npi_fflp_cfg_ip_usr_cls_set(handle,
2691 			    (tcam_class_t)class, 0, 0, pid, 0);
2692 			if (rs != NPI_SUCCESS)
2693 				goto fail;
2694 
2695 			rs = nxge_set_iptun_usr_cls_reg(nxgep, class, iptunp);
2696 
2697 			if (rs != NPI_SUCCESS)
2698 				goto fail;
2699 
2700 			rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
2701 			    (tcam_class_t)class);
2702 			if (rs != NPI_SUCCESS)
2703 				goto fail;
2704 
2705 			hw_p->tcam_l3_prog_cls[i].cls = class;
2706 			hw_p->tcam_l3_prog_cls[i].pid = pid;
2707 			hw_p->tcam_l3_prog_cls[i].flow_pkt_type =
2708 			    iptunp->in_pkt_type;
2709 			hw_p->tcam_l3_prog_cls[i].valid = 1;
2710 			*cls_idp = (uint8_t)class;
2711 			add_cls = 1;
2712 			break;
2713 		} else if (hw_p->tcam_l3_prog_cls[i].pid == pid) {
2714 			if (hw_p->tcam_l3_prog_cls[i].flow_pkt_type == 0) {
2715 				/* there is no flow key */
2716 				/* todo program the existing usr class reg */
2717 
2718 				rs = nxge_set_iptun_usr_cls_reg(nxgep, class,
2719 				    iptunp);
2720 				if (rs != NPI_SUCCESS)
2721 					goto fail;
2722 
2723 				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
2724 				    (tcam_class_t)class);
2725 				if (rs != NPI_SUCCESS)
2726 					goto fail;
2727 
2728 				hw_p->tcam_l3_prog_cls[i].flow_pkt_type =
2729 				    iptunp->in_pkt_type;
2730 				*cls_idp = (uint8_t)class;
2731 				add_cls = 1;
2732 			} else {
2733 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2734 				    "nxge_add_iptun_class: L3 usr "
2735 				    "programmable class with pid %d "
2736 				    "already exists", pid));
2737 			}
2738 			break;
2739 		}
2740 	}
2741 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2742 
2743 	if (add_cls != 1) {
2744 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2745 		    "nxge_add_iptun_class: Could not add IP tunneling class"));
2746 	}
2747 	return;
2748 fail:
2749 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2750 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_iptun_class: FAILED"));
2751 }
2752 
2753 static boolean_t
2754 nxge_is_iptun_cls_present(p_nxge_t nxgep, uint8_t cls_id, int *idx)
2755 {
2756 	int i;
2757 	p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2758 
2759 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
2760 	for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2761 		if (hw_p->tcam_l3_prog_cls[i].valid &&
2762 		    hw_p->tcam_l3_prog_cls[i].flow_pkt_type != 0) {
2763 			if (hw_p->tcam_l3_prog_cls[i].cls == cls_id)
2764 				break;
2765 		}
2766 	}
2767 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2768 
2769 	if (i == NXGE_L3_PROG_CLS) {
2770 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2771 		    "nxge_is_iptun_cls_present: Invalid class %d", cls_id));
2772 		return (B_FALSE);
2773 	} else {
2774 		*idx = i;
2775 		return (B_TRUE);
2776 	}
2777 }
2778 
2779 void
2780 nxge_cfg_iptun_hash(p_nxge_t nxgep, iptun_cfg_t *iptunp, uint8_t cls_id)
2781 {
2782 	int idx;
2783 	npi_handle_t handle = nxgep->npi_reg_handle;
2784 	flow_key_cfg_t cfg;
2785 
2786 	/* check to see that this is a valid class ID */
2787 	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &idx)) {
2788 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2789 		    "nxge_cfg_iptun_hash: nxge_is_iptun_cls_present "
2790 		    "failed for cls_id %d", cls_id));
2791 		return;
2792 	}
2793 
2794 	bzero((void *)&cfg, sizeof (flow_key_cfg_t));
2795 
2796 	/*
2797 	 * This ensures that all 4 bytes of the XOR value are loaded to the
2798 	 * hash key.
2799 	 */
2800 	cfg.use_dport = cfg.use_sport = cfg.ip_opts_exist = 1;
2801 
2802 	cfg.l4_xor_sel = (iptunp->l4xor_sel & FL_KEY_USR_L4XOR_MSK);
2803 	cfg.use_l4_md = 1;
2804 
2805 	if (iptunp->hash_flags & HASH_L3PROTO)
2806 		cfg.use_proto = 1;
2807 	else if (iptunp->hash_flags & HASH_IPDA)
2808 		cfg.use_daddr = 1;
2809 	else if (iptunp->hash_flags & HASH_IPSA)
2810 		cfg.use_saddr = 1;
2811 	else if (iptunp->hash_flags & HASH_VLAN)
2812 		cfg.use_vlan = 1;
2813 	else if (iptunp->hash_flags & HASH_L2DA)
2814 		cfg.use_l2da = 1;
2815 	else if (iptunp->hash_flags & HASH_IFPORT)
2816 		cfg.use_portnum = 1;
2817 
2818 	(void) npi_fflp_cfg_ip_cls_flow_key_rfnl(handle, (tcam_class_t)cls_id,
2819 	    &cfg);
2820 }
2821 
2822 void
2823 nxge_del_iptun_class(p_nxge_t nxgep, uint8_t cls_id)
2824 {
2825 	int i;
2826 	npi_handle_t handle = nxgep->npi_reg_handle;
2827 	npi_status_t rs = NPI_SUCCESS;
2828 
2829 
2830 	/* check to see that this is a valid class ID */
2831 	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &i)) {
2832 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2833 		    "nxge_del_iptun_class: Invalid class ID 0x%x", cls_id));
2834 		return;
2835 	}
2836 
2837 	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2838 	rs = npi_fflp_cfg_ip_usr_cls_disable(handle, (tcam_class_t)cls_id);
2839 	if (rs != NPI_SUCCESS)
2840 		goto fail;
2841 	nxgep->nxge_hw_p->tcam_l3_prog_cls[i].flow_pkt_type = 0;
2842 	if (nxgep->nxge_hw_p->tcam_l3_prog_cls[i].tcam_ref_cnt == 0)
2843 		nxgep->nxge_hw_p->tcam_l3_prog_cls[i].valid = 0;
2844 
2845 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2846 	return;
2847 fail:
2848 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2849 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_iptun_class: FAILED"));
2850 }
2851 
2852 void
2853 nxge_get_iptun_class(p_nxge_t nxgep, iptun_cfg_t *iptunp, uint8_t cls_id)
2854 {
2855 	int i;
2856 	uint8_t pid;
2857 	npi_handle_t handle = nxgep->npi_reg_handle;
2858 	npi_status_t rs = NPI_SUCCESS;
2859 	flow_key_cfg_t cfg;
2860 
2861 
2862 	/* check to see that this is a valid class ID */
2863 	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &i))
2864 		return;
2865 
2866 	bzero((void *)iptunp, sizeof (iptun_cfg_t));
2867 
2868 	pid = nxgep->nxge_hw_p->tcam_l3_prog_cls[i].pid;
2869 
2870 	rs = npi_fflp_cfg_ip_usr_cls_get_iptun(handle, (tcam_class_t)cls_id,
2871 	    &iptunp->l4b0_val, &iptunp->l4b0_mask, &iptunp->l4b23_val,
2872 	    &iptunp->l4b23_sel);
2873 	if (rs != NPI_SUCCESS)
2874 		goto fail;
2875 
2876 	rs = npi_fflp_cfg_ip_cls_flow_key_get_rfnl(handle,
2877 	    (tcam_class_t)cls_id, &cfg);
2878 	if (rs != NPI_SUCCESS)
2879 		goto fail;
2880 
2881 	iptunp->l4xor_sel = cfg.l4_xor_sel;
2882 	if (cfg.use_proto)
2883 		iptunp->hash_flags |= HASH_L3PROTO;
2884 	else if (cfg.use_daddr)
2885 		iptunp->hash_flags |= HASH_IPDA;
2886 	else if (cfg.use_saddr)
2887 		iptunp->hash_flags |= HASH_IPSA;
2888 	else if (cfg.use_vlan)
2889 		iptunp->hash_flags |= HASH_VLAN;
2890 	else if (cfg.use_l2da)
2891 		iptunp->hash_flags |= HASH_L2DA;
2892 	else if (cfg.use_portnum)
2893 		iptunp->hash_flags |= HASH_IFPORT;
2894 
2895 	switch (pid) {
2896 	case 4:
2897 		iptunp->in_pkt_type = IPTUN_PKT_IPV4;
2898 		break;
2899 	case 41:
2900 		iptunp->in_pkt_type = IPTUN_PKT_IPV6;
2901 		break;
2902 	case 47:
2903 		iptunp->in_pkt_type = IPTUN_PKT_GRE;
2904 		break;
2905 	case 17:
2906 		iptunp->in_pkt_type = IPTUN_PKT_GTP;
2907 		break;
2908 	default:
2909 		iptunp->in_pkt_type = 0;
2910 		break;
2911 	}
2912 
2913 	return;
2914 fail:
2915 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_iptun_class: FAILED"));
2916 }
2917 
2918 void
2919 nxge_set_ip_cls_sym(p_nxge_t nxgep, uint8_t cls_id, uint8_t sym)
2920 {
2921 	npi_handle_t handle = nxgep->npi_reg_handle;
2922 	npi_status_t rs = NPI_SUCCESS;
2923 	boolean_t sym_en = (sym == 1) ? B_TRUE : B_FALSE;
2924 
2925 	rs = npi_fflp_cfg_sym_ip_cls_flow_key(handle, (tcam_class_t)cls_id,
2926 	    sym_en);
2927 	if (rs != NPI_SUCCESS)
2928 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2929 		    "nxge_set_ip_cls_sym: FAILED"));
2930 }
2931 
2932 void
2933 nxge_get_ip_cls_sym(p_nxge_t nxgep, uint8_t cls_id, uint8_t *sym)
2934 {
2935 	npi_handle_t handle = nxgep->npi_reg_handle;
2936 	npi_status_t rs = NPI_SUCCESS;
2937 	flow_key_cfg_t cfg;
2938 
2939 	rs = npi_fflp_cfg_ip_cls_flow_key_get_rfnl(handle,
2940 	    (tcam_class_t)cls_id, &cfg);
2941 	if (rs != NPI_SUCCESS)
2942 		goto fail;
2943 
2944 	if (cfg.use_sym)
2945 		*sym = 1;
2946 	else
2947 		*sym = 0;
2948 	return;
2949 fail:
2950 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_ip_cls_sym: FAILED"));
2951 }
2952