1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_rxdma.h>
29 #include <sys/nxge/nxge_hio.h>
30
31 #if !defined(_BIG_ENDIAN)
32 #include <npi_rx_rd32.h>
33 #endif
34 #include <npi_rx_rd64.h>
35 #include <npi_rx_wr64.h>
36
37 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
38 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
39 #define NXGE_ACTUAL_RDC(nxgep, rdc) \
40 (rdc + nxgep->pt_config.hw_config.start_rdc)
41
42 /*
43 * Globals: tunable parameters (/etc/system or adb)
44 *
45 */
46 extern uint32_t nxge_rbr_size;
47 extern uint32_t nxge_rcr_size;
48 extern uint32_t nxge_rbr_spare_size;
49 extern uint16_t nxge_rdc_buf_offset;
50
51 extern uint32_t nxge_mblks_pending;
52
53 /*
54 * Tunable to reduce the amount of time spent in the
55 * ISR doing Rx Processing.
56 */
57 extern uint32_t nxge_max_rx_pkts;
58
59 /*
60 * Tunables to manage the receive buffer blocks.
61 *
62 * nxge_rx_threshold_hi: copy all buffers.
63 * nxge_rx_bcopy_size_type: receive buffer block size type.
64 * nxge_rx_threshold_lo: copy only up to tunable block size type.
65 */
66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
67 extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
68 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
69
70 extern uint32_t nxge_cksum_offload;
71
72 static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
73 static void nxge_unmap_rxdma(p_nxge_t, int);
74
75 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
76
77 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
78 static void nxge_rxdma_hw_stop(p_nxge_t, int);
79
80 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
81 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
82 uint32_t,
83 p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
84 p_rx_mbox_t *);
85 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
86 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
87
88 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
89 uint16_t,
90 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
91 p_rx_rcr_ring_t *, p_rx_mbox_t *);
92 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
93 p_rx_rcr_ring_t, p_rx_mbox_t);
94
95 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
96 uint16_t,
97 p_nxge_dma_common_t *,
98 p_rx_rbr_ring_t *, uint32_t);
99 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
100 p_rx_rbr_ring_t);
101
102 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
103 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
104 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
105
106 static mblk_t *
107 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
108
109 static void nxge_receive_packet(p_nxge_t,
110 p_rx_rcr_ring_t,
111 p_rcr_entry_t,
112 boolean_t *,
113 mblk_t **, mblk_t **);
114
115 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
116
117 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
118 static void nxge_freeb(p_rx_msg_t);
119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
120
121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
122 uint32_t, uint32_t);
123
124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
125 p_rx_rbr_ring_t);
126
127
128 static nxge_status_t
129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
130
131 nxge_status_t
132 nxge_rx_port_fatal_err_recover(p_nxge_t);
133
134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
135
136 nxge_status_t
nxge_init_rxdma_channels(p_nxge_t nxgep)137 nxge_init_rxdma_channels(p_nxge_t nxgep)
138 {
139 nxge_grp_set_t *set = &nxgep->rx_set;
140 int i, count, channel;
141 nxge_grp_t *group;
142 dc_map_t map;
143 int dev_gindex;
144
145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
146
147 if (!isLDOMguest(nxgep)) {
148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
149 cmn_err(CE_NOTE, "hw_start_common");
150 return (NXGE_ERROR);
151 }
152 }
153
154 /*
155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
156 * We only have 8 hardware RDC tables, but we may have
157 * up to 16 logical (software-defined) groups of RDCS,
158 * if we make use of layer 3 & 4 hardware classification.
159 */
160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
161 if ((1 << i) & set->lg.map) {
162 group = set->group[i];
163 dev_gindex =
164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
165 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
167 if ((1 << channel) & map) {
168 if ((nxge_grp_dc_add(nxgep,
169 group, VP_BOUND_RX, channel)))
170 goto init_rxdma_channels_exit;
171 }
172 }
173 }
174 if (++count == set->lg.count)
175 break;
176 }
177
178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
179 return (NXGE_OK);
180
181 init_rxdma_channels_exit:
182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
183 if ((1 << i) & set->lg.map) {
184 group = set->group[i];
185 dev_gindex =
186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
187 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
189 if ((1 << channel) & map) {
190 nxge_grp_dc_remove(nxgep,
191 VP_BOUND_RX, channel);
192 }
193 }
194 }
195 if (++count == set->lg.count)
196 break;
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
200 return (NXGE_ERROR);
201 }
202
203 nxge_status_t
nxge_init_rxdma_channel(p_nxge_t nxge,int channel)204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
205 {
206 nxge_status_t status;
207
208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
209
210 status = nxge_map_rxdma(nxge, channel);
211 if (status != NXGE_OK) {
212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
213 "<== nxge_init_rxdma: status 0x%x", status));
214 return (status);
215 }
216
217 #if defined(sun4v)
218 if (isLDOMguest(nxge)) {
219 /* set rcr_ring */
220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel];
221
222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel);
223 if (status != NXGE_OK) {
224 nxge_unmap_rxdma(nxge, channel);
225 return (status);
226 }
227 }
228 #endif
229
230 status = nxge_rxdma_hw_start(nxge, channel);
231 if (status != NXGE_OK) {
232 nxge_unmap_rxdma(nxge, channel);
233 }
234
235 if (!nxge->statsp->rdc_ksp[channel])
236 nxge_setup_rdc_kstats(nxge, channel);
237
238 NXGE_DEBUG_MSG((nxge, MEM2_CTL,
239 "<== nxge_init_rxdma_channel: status 0x%x", status));
240
241 return (status);
242 }
243
244 void
nxge_uninit_rxdma_channels(p_nxge_t nxgep)245 nxge_uninit_rxdma_channels(p_nxge_t nxgep)
246 {
247 nxge_grp_set_t *set = &nxgep->rx_set;
248 int rdc;
249
250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
251
252 if (set->owned.map == 0) {
253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
254 "nxge_uninit_rxdma_channels: no channels"));
255 return;
256 }
257
258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
259 if ((1 << rdc) & set->owned.map) {
260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
261 }
262 }
263
264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
265 }
266
267 void
nxge_uninit_rxdma_channel(p_nxge_t nxgep,int channel)268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
269 {
270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
271
272 if (nxgep->statsp->rdc_ksp[channel]) {
273 kstat_delete(nxgep->statsp->rdc_ksp[channel]);
274 nxgep->statsp->rdc_ksp[channel] = 0;
275 }
276
277 nxge_rxdma_hw_stop(nxgep, channel);
278 nxge_unmap_rxdma(nxgep, channel);
279
280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
281 }
282
283 nxge_status_t
nxge_reset_rxdma_channel(p_nxge_t nxgep,uint16_t channel)284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
285 {
286 npi_handle_t handle;
287 npi_status_t rs = NPI_SUCCESS;
288 nxge_status_t status = NXGE_OK;
289
290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel"));
291
292 handle = NXGE_DEV_NPI_HANDLE(nxgep);
293 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
294
295 if (rs != NPI_SUCCESS) {
296 status = NXGE_ERROR | rs;
297 }
298
299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
300
301 return (status);
302 }
303
304 void
nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
306 {
307 nxge_grp_set_t *set = &nxgep->rx_set;
308 int rdc;
309
310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
311
312 if (!isLDOMguest(nxgep)) {
313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
314 (void) npi_rxdma_dump_fzc_regs(handle);
315 }
316
317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
318 NXGE_DEBUG_MSG((nxgep, TX_CTL,
319 "nxge_rxdma_regs_dump_channels: "
320 "NULL ring pointer(s)"));
321 return;
322 }
323
324 if (set->owned.map == 0) {
325 NXGE_DEBUG_MSG((nxgep, RX_CTL,
326 "nxge_rxdma_regs_dump_channels: no channels"));
327 return;
328 }
329
330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
331 if ((1 << rdc) & set->owned.map) {
332 rx_rbr_ring_t *ring =
333 nxgep->rx_rbr_rings->rbr_rings[rdc];
334 if (ring) {
335 (void) nxge_dump_rxdma_channel(nxgep, rdc);
336 }
337 }
338 }
339
340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
341 }
342
343 nxge_status_t
nxge_dump_rxdma_channel(p_nxge_t nxgep,uint8_t channel)344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
345 {
346 npi_handle_t handle;
347 npi_status_t rs = NPI_SUCCESS;
348 nxge_status_t status = NXGE_OK;
349
350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
351
352 handle = NXGE_DEV_NPI_HANDLE(nxgep);
353 rs = npi_rxdma_dump_rdc_regs(handle, channel);
354
355 if (rs != NPI_SUCCESS) {
356 status = NXGE_ERROR | rs;
357 }
358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
359 return (status);
360 }
361
362 nxge_status_t
nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ent_msk_t mask_p)363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
364 p_rx_dma_ent_msk_t mask_p)
365 {
366 npi_handle_t handle;
367 npi_status_t rs = NPI_SUCCESS;
368 nxge_status_t status = NXGE_OK;
369
370 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
371 "<== nxge_init_rxdma_channel_event_mask"));
372
373 handle = NXGE_DEV_NPI_HANDLE(nxgep);
374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
375 if (rs != NPI_SUCCESS) {
376 status = NXGE_ERROR | rs;
377 }
378
379 return (status);
380 }
381
382 nxge_status_t
nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ctl_stat_t cs_p)383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
384 p_rx_dma_ctl_stat_t cs_p)
385 {
386 npi_handle_t handle;
387 npi_status_t rs = NPI_SUCCESS;
388 nxge_status_t status = NXGE_OK;
389
390 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
391 "<== nxge_init_rxdma_channel_cntl_stat"));
392
393 handle = NXGE_DEV_NPI_HANDLE(nxgep);
394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
395
396 if (rs != NPI_SUCCESS) {
397 status = NXGE_ERROR | rs;
398 }
399
400 return (status);
401 }
402
403 /*
404 * nxge_rxdma_cfg_rdcgrp_default_rdc
405 *
406 * Set the default RDC for an RDC Group (Table)
407 *
408 * Arguments:
409 * nxgep
410 * rdcgrp The group to modify
411 * rdc The new default RDC.
412 *
413 * Notes:
414 *
415 * NPI/NXGE function calls:
416 * npi_rxdma_cfg_rdc_table_default_rdc()
417 *
418 * Registers accessed:
419 * RDC_TBL_REG: FZC_ZCP + 0x10000
420 *
421 * Context:
422 * Service domain
423 */
424 nxge_status_t
nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep,uint8_t rdcgrp,uint8_t rdc)425 nxge_rxdma_cfg_rdcgrp_default_rdc(
426 p_nxge_t nxgep,
427 uint8_t rdcgrp,
428 uint8_t rdc)
429 {
430 npi_handle_t handle;
431 npi_status_t rs = NPI_SUCCESS;
432 p_nxge_dma_pt_cfg_t p_dma_cfgp;
433 p_nxge_rdc_grp_t rdc_grp_p;
434 uint8_t actual_rdcgrp, actual_rdc;
435
436 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
439
440 handle = NXGE_DEV_NPI_HANDLE(nxgep);
441
442 /*
443 * This has to be rewritten. Do we even allow this anymore?
444 */
445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
446 RDC_MAP_IN(rdc_grp_p->map, rdc);
447 rdc_grp_p->def_rdc = rdc;
448
449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
451
452 rs = npi_rxdma_cfg_rdc_table_default_rdc(
453 handle, actual_rdcgrp, actual_rdc);
454
455 if (rs != NPI_SUCCESS) {
456 return (NXGE_ERROR | rs);
457 }
458 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
460 return (NXGE_OK);
461 }
462
463 nxge_status_t
nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep,uint8_t port,uint8_t rdc)464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
465 {
466 npi_handle_t handle;
467
468 uint8_t actual_rdc;
469 npi_status_t rs = NPI_SUCCESS;
470
471 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
472 " ==> nxge_rxdma_cfg_port_default_rdc"));
473
474 handle = NXGE_DEV_NPI_HANDLE(nxgep);
475 actual_rdc = rdc; /* XXX Hack! */
476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
477
478
479 if (rs != NPI_SUCCESS) {
480 return (NXGE_ERROR | rs);
481 }
482 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
483 " <== nxge_rxdma_cfg_port_default_rdc"));
484
485 return (NXGE_OK);
486 }
487
488 nxge_status_t
nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep,uint8_t channel,uint16_t pkts)489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
490 uint16_t pkts)
491 {
492 npi_status_t rs = NPI_SUCCESS;
493 npi_handle_t handle;
494 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
495 " ==> nxge_rxdma_cfg_rcr_threshold"));
496 handle = NXGE_DEV_NPI_HANDLE(nxgep);
497
498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
499
500 if (rs != NPI_SUCCESS) {
501 return (NXGE_ERROR | rs);
502 }
503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
504 return (NXGE_OK);
505 }
506
507 nxge_status_t
nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep,uint8_t channel,uint16_t tout,uint8_t enable)508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
509 uint16_t tout, uint8_t enable)
510 {
511 npi_status_t rs = NPI_SUCCESS;
512 npi_handle_t handle;
513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
514 handle = NXGE_DEV_NPI_HANDLE(nxgep);
515 if (enable == 0) {
516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
517 } else {
518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
519 tout);
520 }
521
522 if (rs != NPI_SUCCESS) {
523 return (NXGE_ERROR | rs);
524 }
525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
526 return (NXGE_OK);
527 }
528
529 nxge_status_t
nxge_enable_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
532 {
533 npi_handle_t handle;
534 rdc_desc_cfg_t rdc_desc;
535 p_rcrcfig_b_t cfgb_p;
536 npi_status_t rs = NPI_SUCCESS;
537
538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
539 handle = NXGE_DEV_NPI_HANDLE(nxgep);
540 /*
541 * Use configuration data composed at init time.
542 * Write to hardware the receive ring configurations.
543 */
544 rdc_desc.mbox_enable = 1;
545 rdc_desc.mbox_addr = mbox_p->mbox_addr;
546 NXGE_DEBUG_MSG((nxgep, RX_CTL,
547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
548 mbox_p->mbox_addr, rdc_desc.mbox_addr));
549
550 rdc_desc.rbr_len = rbr_p->rbb_max;
551 rdc_desc.rbr_addr = rbr_p->rbr_addr;
552
553 switch (nxgep->rx_bksize_code) {
554 case RBR_BKSIZE_4K:
555 rdc_desc.page_size = SIZE_4KB;
556 break;
557 case RBR_BKSIZE_8K:
558 rdc_desc.page_size = SIZE_8KB;
559 break;
560 case RBR_BKSIZE_16K:
561 rdc_desc.page_size = SIZE_16KB;
562 break;
563 case RBR_BKSIZE_32K:
564 rdc_desc.page_size = SIZE_32KB;
565 break;
566 }
567
568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
569 rdc_desc.valid0 = 1;
570
571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
572 rdc_desc.valid1 = 1;
573
574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
575 rdc_desc.valid2 = 1;
576
577 rdc_desc.full_hdr = rcr_p->full_hdr_flag;
578 rdc_desc.offset = rcr_p->sw_priv_hdr_len;
579
580 rdc_desc.rcr_len = rcr_p->comp_size;
581 rdc_desc.rcr_addr = rcr_p->rcr_addr;
582
583 cfgb_p = &(rcr_p->rcr_cfgb);
584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
585 /* For now, disable this timeout in a guest domain. */
586 if (isLDOMguest(nxgep)) {
587 rdc_desc.rcr_timeout = 0;
588 rdc_desc.rcr_timeout_enable = 0;
589 } else {
590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
592 }
593
594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
595 "rbr_len qlen %d pagesize code %d rcr_len %d",
596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
598 "size 0 %d size 1 %d size 2 %d",
599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
600 rbr_p->npi_pkt_buf_size2));
601
602 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF)
603 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
604 &rdc_desc, B_TRUE);
605 else
606 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
607 &rdc_desc, B_FALSE);
608 if (rs != NPI_SUCCESS) {
609 return (NXGE_ERROR | rs);
610 }
611
612 /*
613 * Enable the timeout and threshold.
614 */
615 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
616 rdc_desc.rcr_threshold);
617 if (rs != NPI_SUCCESS) {
618 return (NXGE_ERROR | rs);
619 }
620
621 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
622 rdc_desc.rcr_timeout);
623 if (rs != NPI_SUCCESS) {
624 return (NXGE_ERROR | rs);
625 }
626
627 if (!isLDOMguest(nxgep)) {
628 /* Enable the DMA */
629 rs = npi_rxdma_cfg_rdc_enable(handle, channel);
630 if (rs != NPI_SUCCESS) {
631 return (NXGE_ERROR | rs);
632 }
633 }
634
635 /* Kick the DMA engine. */
636 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
637
638 if (!isLDOMguest(nxgep)) {
639 /* Clear the rbr empty bit */
640 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
641 }
642
643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
644
645 return (NXGE_OK);
646 }
647
648 nxge_status_t
nxge_disable_rxdma_channel(p_nxge_t nxgep,uint16_t channel)649 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
650 {
651 npi_handle_t handle;
652 npi_status_t rs = NPI_SUCCESS;
653
654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
655 handle = NXGE_DEV_NPI_HANDLE(nxgep);
656
657 /* disable the DMA */
658 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
659 if (rs != NPI_SUCCESS) {
660 NXGE_DEBUG_MSG((nxgep, RX_CTL,
661 "<== nxge_disable_rxdma_channel:failed (0x%x)",
662 rs));
663 return (NXGE_ERROR | rs);
664 }
665
666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
667 return (NXGE_OK);
668 }
669
670 nxge_status_t
nxge_rxdma_channel_rcrflush(p_nxge_t nxgep,uint8_t channel)671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
672 {
673 npi_handle_t handle;
674 nxge_status_t status = NXGE_OK;
675
676 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
677 "<== nxge_init_rxdma_channel_rcrflush"));
678
679 handle = NXGE_DEV_NPI_HANDLE(nxgep);
680 npi_rxdma_rdc_rcr_flush(handle, channel);
681
682 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
683 "<== nxge_init_rxdma_channel_rcrflsh"));
684 return (status);
685
686 }
687
688 #define MID_INDEX(l, r) ((r + l + 1) >> 1)
689
690 #define TO_LEFT -1
691 #define TO_RIGHT 1
692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
693 #define BOTH_LEFT (TO_LEFT + TO_LEFT)
694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT)
695 #define NO_HINT 0xffffffff
696
697 /*ARGSUSED*/
698 nxge_status_t
nxge_rxbuf_pp_to_vp(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p,uint8_t pktbufsz_type,uint64_t * pkt_buf_addr_pp,uint64_t ** pkt_buf_addr_p,uint32_t * bufoffset,uint32_t * msg_index)699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
700 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
701 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
702 {
703 int bufsize;
704 uint64_t pktbuf_pp;
705 uint64_t dvma_addr;
706 rxring_info_t *ring_info;
707 int base_side, end_side;
708 int r_index, l_index, anchor_index;
709 int found, search_done;
710 uint32_t offset, chunk_size, block_size, page_size_mask;
711 uint32_t chunk_index, block_index, total_index;
712 int max_iterations, iteration;
713 rxbuf_index_info_t *bufinfo;
714
715 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
716
717 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
719 pkt_buf_addr_pp,
720 pktbufsz_type));
721 pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
722
723 switch (pktbufsz_type) {
724 case 0:
725 bufsize = rbr_p->pkt_buf_size0;
726 break;
727 case 1:
728 bufsize = rbr_p->pkt_buf_size1;
729 break;
730 case 2:
731 bufsize = rbr_p->pkt_buf_size2;
732 break;
733 case RCR_SINGLE_BLOCK:
734 bufsize = 0;
735 anchor_index = 0;
736 break;
737 default:
738 return (NXGE_ERROR);
739 }
740
741 if (rbr_p->num_blocks == 1) {
742 anchor_index = 0;
743 ring_info = rbr_p->ring_info;
744 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
745 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
746 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
747 "buf_pp $%p btype %d anchor_index %d "
748 "bufinfo $%p",
749 pkt_buf_addr_pp,
750 pktbufsz_type,
751 anchor_index,
752 bufinfo));
753
754 goto found_index;
755 }
756
757 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
758 "==> nxge_rxbuf_pp_to_vp: "
759 "buf_pp $%p btype %d anchor_index %d",
760 pkt_buf_addr_pp,
761 pktbufsz_type,
762 anchor_index));
763
764 ring_info = rbr_p->ring_info;
765 found = B_FALSE;
766 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
767 iteration = 0;
768 max_iterations = ring_info->max_iterations;
769 /*
770 * First check if this block has been seen
771 * recently. This is indicated by a hint which
772 * is initialized when the first buffer of the block
773 * is seen. The hint is reset when the last buffer of
774 * the block has been processed.
775 * As three block sizes are supported, three hints
776 * are kept. The idea behind the hints is that once
777 * the hardware uses a block for a buffer of that
778 * size, it will use it exclusively for that size
779 * and will use it until it is exhausted. It is assumed
780 * that there would a single block being used for the same
781 * buffer sizes at any given time.
782 */
783 if (ring_info->hint[pktbufsz_type] != NO_HINT) {
784 anchor_index = ring_info->hint[pktbufsz_type];
785 dvma_addr = bufinfo[anchor_index].dvma_addr;
786 chunk_size = bufinfo[anchor_index].buf_size;
787 if ((pktbuf_pp >= dvma_addr) &&
788 (pktbuf_pp < (dvma_addr + chunk_size))) {
789 found = B_TRUE;
790 /*
791 * check if this is the last buffer in the block
792 * If so, then reset the hint for the size;
793 */
794
795 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
796 ring_info->hint[pktbufsz_type] = NO_HINT;
797 }
798 }
799
800 if (found == B_FALSE) {
801 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
802 "==> nxge_rxbuf_pp_to_vp: (!found)"
803 "buf_pp $%p btype %d anchor_index %d",
804 pkt_buf_addr_pp,
805 pktbufsz_type,
806 anchor_index));
807
808 /*
809 * This is the first buffer of the block of this
810 * size. Need to search the whole information
811 * array.
812 * the search algorithm uses a binary tree search
813 * algorithm. It assumes that the information is
814 * already sorted with increasing order
815 * info[0] < info[1] < info[2] .... < info[n-1]
816 * where n is the size of the information array
817 */
818 r_index = rbr_p->num_blocks - 1;
819 l_index = 0;
820 search_done = B_FALSE;
821 anchor_index = MID_INDEX(r_index, l_index);
822 while (search_done == B_FALSE) {
823 if ((r_index == l_index) ||
824 (iteration >= max_iterations))
825 search_done = B_TRUE;
826 end_side = TO_RIGHT; /* to the right */
827 base_side = TO_LEFT; /* to the left */
828 /* read the DVMA address information and sort it */
829 dvma_addr = bufinfo[anchor_index].dvma_addr;
830 chunk_size = bufinfo[anchor_index].buf_size;
831 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
832 "==> nxge_rxbuf_pp_to_vp: (searching)"
833 "buf_pp $%p btype %d "
834 "anchor_index %d chunk_size %d dvmaaddr $%p",
835 pkt_buf_addr_pp,
836 pktbufsz_type,
837 anchor_index,
838 chunk_size,
839 dvma_addr));
840
841 if (pktbuf_pp >= dvma_addr)
842 base_side = TO_RIGHT; /* to the right */
843 if (pktbuf_pp < (dvma_addr + chunk_size))
844 end_side = TO_LEFT; /* to the left */
845
846 switch (base_side + end_side) {
847 case IN_MIDDLE:
848 /* found */
849 found = B_TRUE;
850 search_done = B_TRUE;
851 if ((pktbuf_pp + bufsize) <
852 (dvma_addr + chunk_size))
853 ring_info->hint[pktbufsz_type] =
854 bufinfo[anchor_index].buf_index;
855 break;
856 case BOTH_RIGHT:
857 /* not found: go to the right */
858 l_index = anchor_index + 1;
859 anchor_index = MID_INDEX(r_index, l_index);
860 break;
861
862 case BOTH_LEFT:
863 /* not found: go to the left */
864 r_index = anchor_index - 1;
865 anchor_index = MID_INDEX(r_index, l_index);
866 break;
867 default: /* should not come here */
868 return (NXGE_ERROR);
869 }
870 iteration++;
871 }
872
873 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
874 "==> nxge_rxbuf_pp_to_vp: (search done)"
875 "buf_pp $%p btype %d anchor_index %d",
876 pkt_buf_addr_pp,
877 pktbufsz_type,
878 anchor_index));
879 }
880
881 if (found == B_FALSE) {
882 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
883 "==> nxge_rxbuf_pp_to_vp: (search failed)"
884 "buf_pp $%p btype %d anchor_index %d",
885 pkt_buf_addr_pp,
886 pktbufsz_type,
887 anchor_index));
888 return (NXGE_ERROR);
889 }
890
891 found_index:
892 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
893 "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
894 "buf_pp $%p btype %d bufsize %d anchor_index %d",
895 pkt_buf_addr_pp,
896 pktbufsz_type,
897 bufsize,
898 anchor_index));
899
900 /* index of the first block in this chunk */
901 chunk_index = bufinfo[anchor_index].start_index;
902 dvma_addr = bufinfo[anchor_index].dvma_addr;
903 page_size_mask = ring_info->block_size_mask;
904
905 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
906 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
907 "buf_pp $%p btype %d bufsize %d "
908 "anchor_index %d chunk_index %d dvma $%p",
909 pkt_buf_addr_pp,
910 pktbufsz_type,
911 bufsize,
912 anchor_index,
913 chunk_index,
914 dvma_addr));
915
916 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
917 block_size = rbr_p->block_size; /* System block(page) size */
918
919 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
920 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
921 "buf_pp $%p btype %d bufsize %d "
922 "anchor_index %d chunk_index %d dvma $%p "
923 "offset %d block_size %d",
924 pkt_buf_addr_pp,
925 pktbufsz_type,
926 bufsize,
927 anchor_index,
928 chunk_index,
929 dvma_addr,
930 offset,
931 block_size));
932
933 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
934
935 block_index = (offset / block_size); /* index within chunk */
936 total_index = chunk_index + block_index;
937
938
939 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
940 "==> nxge_rxbuf_pp_to_vp: "
941 "total_index %d dvma_addr $%p "
942 "offset %d block_size %d "
943 "block_index %d ",
944 total_index, dvma_addr,
945 offset, block_size,
946 block_index));
947 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
948 (uint64_t)offset);
949
950 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
951 "==> nxge_rxbuf_pp_to_vp: "
952 "total_index %d dvma_addr $%p "
953 "offset %d block_size %d "
954 "block_index %d "
955 "*pkt_buf_addr_p $%p",
956 total_index, dvma_addr,
957 offset, block_size,
958 block_index,
959 *pkt_buf_addr_p));
960
961
962 *msg_index = total_index;
963 *bufoffset = (offset & page_size_mask);
964
965 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
966 "==> nxge_rxbuf_pp_to_vp: get msg index: "
967 "msg_index %d bufoffset_index %d",
968 *msg_index,
969 *bufoffset));
970
971 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
972
973 return (NXGE_OK);
974 }
975
976 /*
977 * used by quick sort (qsort) function
978 * to perform comparison
979 */
980 static int
nxge_sort_compare(const void * p1,const void * p2)981 nxge_sort_compare(const void *p1, const void *p2)
982 {
983
984 rxbuf_index_info_t *a, *b;
985
986 a = (rxbuf_index_info_t *)p1;
987 b = (rxbuf_index_info_t *)p2;
988
989 if (a->dvma_addr > b->dvma_addr)
990 return (1);
991 if (a->dvma_addr < b->dvma_addr)
992 return (-1);
993 return (0);
994 }
995
996
997
998 /*
999 * grabbed this sort implementation from common/syscall/avl.c
1000 *
1001 */
1002 /*
1003 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
1004 * v = Ptr to array/vector of objs
1005 * n = # objs in the array
1006 * s = size of each obj (must be multiples of a word size)
1007 * f = ptr to function to compare two objs
1008 * returns (-1 = less than, 0 = equal, 1 = greater than
1009 */
1010 void
nxge_ksort(caddr_t v,int n,int s,int (* f)())1011 nxge_ksort(caddr_t v, int n, int s, int (*f)())
1012 {
1013 int g, i, j, ii;
1014 unsigned int *p1, *p2;
1015 unsigned int tmp;
1016
1017 /* No work to do */
1018 if (v == NULL || n <= 1)
1019 return;
1020 /* Sanity check on arguments */
1021 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
1022 ASSERT(s > 0);
1023
1024 for (g = n / 2; g > 0; g /= 2) {
1025 for (i = g; i < n; i++) {
1026 for (j = i - g; j >= 0 &&
1027 (*f)(v + j * s, v + (j + g) * s) == 1;
1028 j -= g) {
1029 p1 = (unsigned *)(v + j * s);
1030 p2 = (unsigned *)(v + (j + g) * s);
1031 for (ii = 0; ii < s / 4; ii++) {
1032 tmp = *p1;
1033 *p1++ = *p2;
1034 *p2++ = tmp;
1035 }
1036 }
1037 }
1038 }
1039 }
1040
1041 /*
1042 * Initialize data structures required for rxdma
1043 * buffer dvma->vmem address lookup
1044 */
1045 /*ARGSUSED*/
1046 static nxge_status_t
nxge_rxbuf_index_info_init(p_nxge_t nxgep,p_rx_rbr_ring_t rbrp)1047 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
1048 {
1049
1050 int index;
1051 rxring_info_t *ring_info;
1052 int max_iteration = 0, max_index = 0;
1053
1054 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
1055
1056 ring_info = rbrp->ring_info;
1057 ring_info->hint[0] = NO_HINT;
1058 ring_info->hint[1] = NO_HINT;
1059 ring_info->hint[2] = NO_HINT;
1060 max_index = rbrp->num_blocks;
1061
1062 /* read the DVMA address information and sort it */
1063 /* do init of the information array */
1064
1065
1066 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1067 " nxge_rxbuf_index_info_init Sort ptrs"));
1068
1069 /* sort the array */
1070 nxge_ksort((void *)ring_info->buffer, max_index,
1071 sizeof (rxbuf_index_info_t), nxge_sort_compare);
1072
1073
1074
1075 for (index = 0; index < max_index; index++) {
1076 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1077 " nxge_rxbuf_index_info_init: sorted chunk %d "
1078 " ioaddr $%p kaddr $%p size %x",
1079 index, ring_info->buffer[index].dvma_addr,
1080 ring_info->buffer[index].kaddr,
1081 ring_info->buffer[index].buf_size));
1082 }
1083
1084 max_iteration = 0;
1085 while (max_index >= (1ULL << max_iteration))
1086 max_iteration++;
1087 ring_info->max_iterations = max_iteration + 1;
1088 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
1089 " nxge_rxbuf_index_info_init Find max iter %d",
1090 ring_info->max_iterations));
1091
1092 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
1093 return (NXGE_OK);
1094 }
1095
1096 /* ARGSUSED */
1097 void
nxge_dump_rcr_entry(p_nxge_t nxgep,p_rcr_entry_t entry_p)1098 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
1099 {
1100 #ifdef NXGE_DEBUG
1101
1102 uint32_t bptr;
1103 uint64_t pp;
1104
1105 bptr = entry_p->bits.hdw.pkt_buf_addr;
1106
1107 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1108 "\trcr entry $%p "
1109 "\trcr entry 0x%0llx "
1110 "\trcr entry 0x%08x "
1111 "\trcr entry 0x%08x "
1112 "\tvalue 0x%0llx\n"
1113 "\tmulti = %d\n"
1114 "\tpkt_type = 0x%x\n"
1115 "\tzero_copy = %d\n"
1116 "\tnoport = %d\n"
1117 "\tpromis = %d\n"
1118 "\terror = 0x%04x\n"
1119 "\tdcf_err = 0x%01x\n"
1120 "\tl2_len = %d\n"
1121 "\tpktbufsize = %d\n"
1122 "\tpkt_buf_addr = $%p\n"
1123 "\tpkt_buf_addr (<< 6) = $%p\n",
1124 entry_p,
1125 *(int64_t *)entry_p,
1126 *(int32_t *)entry_p,
1127 *(int32_t *)((char *)entry_p + 32),
1128 entry_p->value,
1129 entry_p->bits.hdw.multi,
1130 entry_p->bits.hdw.pkt_type,
1131 entry_p->bits.hdw.zero_copy,
1132 entry_p->bits.hdw.noport,
1133 entry_p->bits.hdw.promis,
1134 entry_p->bits.hdw.error,
1135 entry_p->bits.hdw.dcf_err,
1136 entry_p->bits.hdw.l2_len,
1137 entry_p->bits.hdw.pktbufsz,
1138 bptr,
1139 entry_p->bits.ldw.pkt_buf_addr));
1140
1141 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
1142 RCR_PKT_BUF_ADDR_SHIFT;
1143
1144 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
1145 pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
1146 #endif
1147 }
1148
1149 void
nxge_rxdma_regs_dump(p_nxge_t nxgep,int rdc)1150 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
1151 {
1152 npi_handle_t handle;
1153 rbr_stat_t rbr_stat;
1154 addr44_t hd_addr;
1155 addr44_t tail_addr;
1156 uint16_t qlen;
1157
1158 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1159 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
1160
1161 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1162
1163 /* RBR head */
1164 hd_addr.addr = 0;
1165 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
1166 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
1167 (void *)hd_addr.addr);
1168
1169 /* RBR stats */
1170 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
1171 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
1172
1173 /* RCR tail */
1174 tail_addr.addr = 0;
1175 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
1176 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
1177 (void *)tail_addr.addr);
1178
1179 /* RCR qlen */
1180 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1181 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1182
1183 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1184 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1185 }
1186
1187 nxge_status_t
nxge_rxdma_hw_mode(p_nxge_t nxgep,boolean_t enable)1188 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1189 {
1190 nxge_grp_set_t *set = &nxgep->rx_set;
1191 nxge_status_t status;
1192 npi_status_t rs;
1193 int rdc;
1194
1195 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1196 "==> nxge_rxdma_hw_mode: mode %d", enable));
1197
1198 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1199 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1200 "<== nxge_rxdma_mode: not initialized"));
1201 return (NXGE_ERROR);
1202 }
1203
1204 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1205 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1206 "<== nxge_tx_port_fatal_err_recover: "
1207 "NULL ring pointer(s)"));
1208 return (NXGE_ERROR);
1209 }
1210
1211 if (set->owned.map == 0) {
1212 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1213 "nxge_rxdma_regs_dump_channels: no channels"));
1214 return (0);
1215 }
1216
1217 rs = 0;
1218 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1219 if ((1 << rdc) & set->owned.map) {
1220 rx_rbr_ring_t *ring =
1221 nxgep->rx_rbr_rings->rbr_rings[rdc];
1222 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
1223 if (ring) {
1224 if (enable) {
1225 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1226 "==> nxge_rxdma_hw_mode: "
1227 "channel %d (enable)", rdc));
1228 rs = npi_rxdma_cfg_rdc_enable
1229 (handle, rdc);
1230 } else {
1231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1232 "==> nxge_rxdma_hw_mode: "
1233 "channel %d disable)", rdc));
1234 rs = npi_rxdma_cfg_rdc_disable
1235 (handle, rdc);
1236 }
1237 }
1238 }
1239 }
1240
1241 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1242
1243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1244 "<== nxge_rxdma_hw_mode: status 0x%x", status));
1245
1246 return (status);
1247 }
1248
1249 void
nxge_rxdma_enable_channel(p_nxge_t nxgep,uint16_t channel)1250 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1251 {
1252 npi_handle_t handle;
1253
1254 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1255 "==> nxge_rxdma_enable_channel: channel %d", channel));
1256
1257 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1258 (void) npi_rxdma_cfg_rdc_enable(handle, channel);
1259
1260 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
1261 }
1262
1263 void
nxge_rxdma_disable_channel(p_nxge_t nxgep,uint16_t channel)1264 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1265 {
1266 npi_handle_t handle;
1267
1268 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1269 "==> nxge_rxdma_disable_channel: channel %d", channel));
1270
1271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1272 (void) npi_rxdma_cfg_rdc_disable(handle, channel);
1273
1274 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
1275 }
1276
1277 void
nxge_hw_start_rx(p_nxge_t nxgep)1278 nxge_hw_start_rx(p_nxge_t nxgep)
1279 {
1280 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
1281
1282 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
1283 (void) nxge_rx_mac_enable(nxgep);
1284
1285 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
1286 }
1287
1288 /*ARGSUSED*/
1289 void
nxge_fixup_rxdma_rings(p_nxge_t nxgep)1290 nxge_fixup_rxdma_rings(p_nxge_t nxgep)
1291 {
1292 nxge_grp_set_t *set = &nxgep->rx_set;
1293 int rdc;
1294
1295 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
1296
1297 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1298 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1299 "<== nxge_tx_port_fatal_err_recover: "
1300 "NULL ring pointer(s)"));
1301 return;
1302 }
1303
1304 if (set->owned.map == 0) {
1305 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1306 "nxge_rxdma_regs_dump_channels: no channels"));
1307 return;
1308 }
1309
1310 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1311 if ((1 << rdc) & set->owned.map) {
1312 rx_rbr_ring_t *ring =
1313 nxgep->rx_rbr_rings->rbr_rings[rdc];
1314 if (ring) {
1315 nxge_rxdma_hw_stop(nxgep, rdc);
1316 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1317 "==> nxge_fixup_rxdma_rings: "
1318 "channel %d ring $%px",
1319 rdc, ring));
1320 (void) nxge_rxdma_fix_channel(nxgep, rdc);
1321 }
1322 }
1323 }
1324
1325 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
1326 }
1327
1328 void
nxge_rxdma_fix_channel(p_nxge_t nxgep,uint16_t channel)1329 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1330 {
1331 int ndmas;
1332 p_rx_rbr_rings_t rx_rbr_rings;
1333 p_rx_rbr_ring_t *rbr_rings;
1334 p_rx_rcr_rings_t rx_rcr_rings;
1335 p_rx_rcr_ring_t *rcr_rings;
1336 p_rx_mbox_areas_t rx_mbox_areas_p;
1337 p_rx_mbox_t *rx_mbox_p;
1338 p_nxge_dma_pool_t dma_buf_poolp;
1339 p_nxge_dma_pool_t dma_cntl_poolp;
1340 p_rx_rbr_ring_t rbrp;
1341 p_rx_rcr_ring_t rcrp;
1342 p_rx_mbox_t mboxp;
1343 p_nxge_dma_common_t dmap;
1344 nxge_status_t status = NXGE_OK;
1345
1346 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
1347
1348 (void) nxge_rxdma_stop_channel(nxgep, channel);
1349
1350 dma_buf_poolp = nxgep->rx_buf_pool_p;
1351 dma_cntl_poolp = nxgep->rx_cntl_pool_p;
1352
1353 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1354 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1355 "<== nxge_rxdma_fix_channel: buf not allocated"));
1356 return;
1357 }
1358
1359 ndmas = dma_buf_poolp->ndmas;
1360 if (!ndmas) {
1361 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1362 "<== nxge_rxdma_fix_channel: no dma allocated"));
1363 return;
1364 }
1365
1366 rx_rbr_rings = nxgep->rx_rbr_rings;
1367 rx_rcr_rings = nxgep->rx_rcr_rings;
1368 rbr_rings = rx_rbr_rings->rbr_rings;
1369 rcr_rings = rx_rcr_rings->rcr_rings;
1370 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
1371 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
1372
1373 /* Reinitialize the receive block and completion rings */
1374 rbrp = (p_rx_rbr_ring_t)rbr_rings[channel],
1375 rcrp = (p_rx_rcr_ring_t)rcr_rings[channel],
1376 mboxp = (p_rx_mbox_t)rx_mbox_p[channel];
1377
1378 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
1379 rbrp->rbr_rd_index = 0;
1380 rcrp->comp_rd_index = 0;
1381 rcrp->comp_wt_index = 0;
1382
1383 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
1384 bzero((caddr_t)dmap->kaddrp, dmap->alength);
1385
1386 status = nxge_rxdma_start_channel(nxgep, channel,
1387 rbrp, rcrp, mboxp);
1388 if (status != NXGE_OK) {
1389 goto nxge_rxdma_fix_channel_fail;
1390 }
1391
1392 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1393 "<== nxge_rxdma_fix_channel: success (0x%08x)", status));
1394 return;
1395
1396 nxge_rxdma_fix_channel_fail:
1397 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1398 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
1399 }
1400
1401 p_rx_rbr_ring_t
nxge_rxdma_get_rbr_ring(p_nxge_t nxgep,uint16_t channel)1402 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1403 {
1404 nxge_grp_set_t *set = &nxgep->rx_set;
1405 nxge_channel_t rdc;
1406
1407 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1408 "==> nxge_rxdma_get_rbr_ring: channel %d", channel));
1409
1410 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
1411 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1412 "<== nxge_rxdma_get_rbr_ring: "
1413 "NULL ring pointer(s)"));
1414 return (NULL);
1415 }
1416
1417 if (set->owned.map == 0) {
1418 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1419 "<== nxge_rxdma_get_rbr_ring: no channels"));
1420 return (NULL);
1421 }
1422
1423 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1424 if ((1 << rdc) & set->owned.map) {
1425 rx_rbr_ring_t *ring =
1426 nxgep->rx_rbr_rings->rbr_rings[rdc];
1427 if (ring) {
1428 if (channel == ring->rdc) {
1429 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1430 "==> nxge_rxdma_get_rbr_ring: "
1431 "channel %d ring $%p", rdc, ring));
1432 return (ring);
1433 }
1434 }
1435 }
1436 }
1437
1438 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1439 "<== nxge_rxdma_get_rbr_ring: not found"));
1440
1441 return (NULL);
1442 }
1443
1444 p_rx_rcr_ring_t
nxge_rxdma_get_rcr_ring(p_nxge_t nxgep,uint16_t channel)1445 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
1446 {
1447 nxge_grp_set_t *set = &nxgep->rx_set;
1448 nxge_channel_t rdc;
1449
1450 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1451 "==> nxge_rxdma_get_rcr_ring: channel %d", channel));
1452
1453 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
1454 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1455 "<== nxge_rxdma_get_rcr_ring: "
1456 "NULL ring pointer(s)"));
1457 return (NULL);
1458 }
1459
1460 if (set->owned.map == 0) {
1461 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1462 "<== nxge_rxdma_get_rbr_ring: no channels"));
1463 return (NULL);
1464 }
1465
1466 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
1467 if ((1 << rdc) & set->owned.map) {
1468 rx_rcr_ring_t *ring =
1469 nxgep->rx_rcr_rings->rcr_rings[rdc];
1470 if (ring) {
1471 if (channel == ring->rdc) {
1472 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1473 "==> nxge_rxdma_get_rcr_ring: "
1474 "channel %d ring $%p", rdc, ring));
1475 return (ring);
1476 }
1477 }
1478 }
1479 }
1480
1481 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1482 "<== nxge_rxdma_get_rcr_ring: not found"));
1483
1484 return (NULL);
1485 }
1486
1487 /*
1488 * Static functions start here.
1489 */
1490 static p_rx_msg_t
nxge_allocb(size_t size,uint32_t pri,p_nxge_dma_common_t dmabuf_p)1491 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
1492 {
1493 p_rx_msg_t nxge_mp = NULL;
1494 p_nxge_dma_common_t dmamsg_p;
1495 uchar_t *buffer;
1496
1497 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
1498 if (nxge_mp == NULL) {
1499 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1500 "Allocation of a rx msg failed."));
1501 goto nxge_allocb_exit;
1502 }
1503
1504 nxge_mp->use_buf_pool = B_FALSE;
1505 if (dmabuf_p) {
1506 nxge_mp->use_buf_pool = B_TRUE;
1507 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
1508 *dmamsg_p = *dmabuf_p;
1509 dmamsg_p->nblocks = 1;
1510 dmamsg_p->block_size = size;
1511 dmamsg_p->alength = size;
1512 buffer = (uchar_t *)dmabuf_p->kaddrp;
1513
1514 dmabuf_p->kaddrp = (void *)
1515 ((char *)dmabuf_p->kaddrp + size);
1516 dmabuf_p->ioaddr_pp = (void *)
1517 ((char *)dmabuf_p->ioaddr_pp + size);
1518 dmabuf_p->alength -= size;
1519 dmabuf_p->offset += size;
1520 dmabuf_p->dma_cookie.dmac_laddress += size;
1521 dmabuf_p->dma_cookie.dmac_size -= size;
1522
1523 } else {
1524 buffer = KMEM_ALLOC(size, KM_NOSLEEP);
1525 if (buffer == NULL) {
1526 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
1527 "Allocation of a receive page failed."));
1528 goto nxge_allocb_fail1;
1529 }
1530 }
1531
1532 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
1533 if (nxge_mp->rx_mblk_p == NULL) {
1534 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
1535 goto nxge_allocb_fail2;
1536 }
1537
1538 nxge_mp->buffer = buffer;
1539 nxge_mp->block_size = size;
1540 nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
1541 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
1542 nxge_mp->ref_cnt = 1;
1543 nxge_mp->free = B_TRUE;
1544 nxge_mp->rx_use_bcopy = B_FALSE;
1545
1546 atomic_inc_32(&nxge_mblks_pending);
1547
1548 goto nxge_allocb_exit;
1549
1550 nxge_allocb_fail2:
1551 if (!nxge_mp->use_buf_pool) {
1552 KMEM_FREE(buffer, size);
1553 }
1554
1555 nxge_allocb_fail1:
1556 KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
1557 nxge_mp = NULL;
1558
1559 nxge_allocb_exit:
1560 return (nxge_mp);
1561 }
1562
1563 p_mblk_t
nxge_dupb(p_rx_msg_t nxge_mp,uint_t offset,size_t size)1564 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1565 {
1566 p_mblk_t mp;
1567
1568 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
1569 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
1570 "offset = 0x%08X "
1571 "size = 0x%08X",
1572 nxge_mp, offset, size));
1573
1574 mp = desballoc(&nxge_mp->buffer[offset], size,
1575 0, &nxge_mp->freeb);
1576 if (mp == NULL) {
1577 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1578 goto nxge_dupb_exit;
1579 }
1580 atomic_inc_32(&nxge_mp->ref_cnt);
1581
1582
1583 nxge_dupb_exit:
1584 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1585 nxge_mp));
1586 return (mp);
1587 }
1588
1589 p_mblk_t
nxge_dupb_bcopy(p_rx_msg_t nxge_mp,uint_t offset,size_t size)1590 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
1591 {
1592 p_mblk_t mp;
1593 uchar_t *dp;
1594
1595 mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
1596 if (mp == NULL) {
1597 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
1598 goto nxge_dupb_bcopy_exit;
1599 }
1600 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
1601 bcopy((void *)&nxge_mp->buffer[offset], dp, size);
1602 mp->b_wptr = dp + size;
1603
1604 nxge_dupb_bcopy_exit:
1605 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
1606 nxge_mp));
1607 return (mp);
1608 }
1609
1610 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
1611 p_rx_msg_t rx_msg_p);
1612
1613 void
nxge_post_page(p_nxge_t nxgep,p_rx_rbr_ring_t rx_rbr_p,p_rx_msg_t rx_msg_p)1614 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
1615 {
1616 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
1617
1618 /* Reuse this buffer */
1619 rx_msg_p->free = B_FALSE;
1620 rx_msg_p->cur_usage_cnt = 0;
1621 rx_msg_p->max_usage_cnt = 0;
1622 rx_msg_p->pkt_buf_size = 0;
1623
1624 if (rx_rbr_p->rbr_use_bcopy) {
1625 rx_msg_p->rx_use_bcopy = B_FALSE;
1626 atomic_dec_32(&rx_rbr_p->rbr_consumed);
1627 }
1628
1629 /*
1630 * Get the rbr header pointer and its offset index.
1631 */
1632 MUTEX_ENTER(&rx_rbr_p->post_lock);
1633 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
1634 rx_rbr_p->rbr_wrap_mask);
1635 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
1636 MUTEX_EXIT(&rx_rbr_p->post_lock);
1637 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
1638 rx_rbr_p->rdc, 1);
1639
1640 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1641 "<== nxge_post_page (channel %d post_next_index %d)",
1642 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
1643
1644 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
1645 }
1646
1647 void
nxge_freeb(p_rx_msg_t rx_msg_p)1648 nxge_freeb(p_rx_msg_t rx_msg_p)
1649 {
1650 size_t size;
1651 uchar_t *buffer = NULL;
1652 int ref_cnt;
1653 boolean_t free_state = B_FALSE;
1654
1655 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
1656
1657 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
1658 NXGE_DEBUG_MSG((NULL, MEM2_CTL,
1659 "nxge_freeb:rx_msg_p = $%p (block pending %d)",
1660 rx_msg_p, nxge_mblks_pending));
1661
1662 /*
1663 * First we need to get the free state, then
1664 * atomic decrement the reference count to prevent
1665 * the race condition with the interrupt thread that
1666 * is processing a loaned up buffer block.
1667 */
1668 free_state = rx_msg_p->free;
1669 ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
1670 if (!ref_cnt) {
1671 atomic_dec_32(&nxge_mblks_pending);
1672 buffer = rx_msg_p->buffer;
1673 size = rx_msg_p->block_size;
1674 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
1675 "will free: rx_msg_p = $%p (block pending %d)",
1676 rx_msg_p, nxge_mblks_pending));
1677
1678 if (!rx_msg_p->use_buf_pool) {
1679 KMEM_FREE(buffer, size);
1680 }
1681
1682 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1683
1684 if (ring) {
1685 /*
1686 * Decrement the receive buffer ring's reference
1687 * count, too.
1688 */
1689 atomic_dec_32(&ring->rbr_ref_cnt);
1690
1691 /*
1692 * Free the receive buffer ring, if
1693 * 1. all the receive buffers have been freed
1694 * 2. and we are in the proper state (that is,
1695 * we are not UNMAPPING).
1696 */
1697 if (ring->rbr_ref_cnt == 0 &&
1698 ring->rbr_state == RBR_UNMAPPED) {
1699 /*
1700 * Free receive data buffers,
1701 * buffer index information
1702 * (rxring_info) and
1703 * the message block ring.
1704 */
1705 NXGE_DEBUG_MSG((NULL, RX_CTL,
1706 "nxge_freeb:rx_msg_p = $%p "
1707 "(block pending %d) free buffers",
1708 rx_msg_p, nxge_mblks_pending));
1709 nxge_rxdma_databuf_free(ring);
1710 if (ring->ring_info) {
1711 KMEM_FREE(ring->ring_info,
1712 sizeof (rxring_info_t));
1713 }
1714
1715 if (ring->rx_msg_ring) {
1716 KMEM_FREE(ring->rx_msg_ring,
1717 ring->tnblocks *
1718 sizeof (p_rx_msg_t));
1719 }
1720 KMEM_FREE(ring, sizeof (*ring));
1721 }
1722 }
1723 return;
1724 }
1725
1726 /*
1727 * Repost buffer.
1728 */
1729 if (free_state && (ref_cnt == 1) && ring) {
1730 NXGE_DEBUG_MSG((NULL, RX_CTL,
1731 "nxge_freeb: post page $%p:", rx_msg_p));
1732 if (ring->rbr_state == RBR_POSTING)
1733 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
1734 }
1735
1736 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
1737 }
1738
1739 uint_t
nxge_rx_intr(char * arg1,char * arg2)1740 nxge_rx_intr(char *arg1, char *arg2)
1741 {
1742 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1743 p_nxge_t nxgep = (p_nxge_t)arg2;
1744 p_nxge_ldg_t ldgp;
1745 uint8_t channel;
1746 npi_handle_t handle;
1747 rx_dma_ctl_stat_t cs;
1748 p_rx_rcr_ring_t rcrp;
1749 mblk_t *mp = NULL;
1750
1751 if (ldvp == NULL) {
1752 NXGE_DEBUG_MSG((NULL, INT_CTL,
1753 "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1754 nxgep, ldvp));
1755 return (DDI_INTR_CLAIMED);
1756 }
1757
1758 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1759 nxgep = ldvp->nxgep;
1760 }
1761
1762 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1763 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1764 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1765 "<== nxge_rx_intr: interface not started or intialized"));
1766 return (DDI_INTR_CLAIMED);
1767 }
1768
1769 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1770 "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1771 nxgep, ldvp));
1772
1773 /*
1774 * Get the PIO handle.
1775 */
1776 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1777
1778 /*
1779 * Get the ring to enable us to process packets.
1780 */
1781 rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
1782
1783 /*
1784 * The RCR ring lock must be held when packets
1785 * are being processed and the hardware registers are
1786 * being read or written to prevent race condition
1787 * among the interrupt thread, the polling thread
1788 * (will cause fatal errors such as rcrincon bit set)
1789 * and the setting of the poll_flag.
1790 */
1791 MUTEX_ENTER(&rcrp->lock);
1792
1793 /*
1794 * Get the control and status for this channel.
1795 */
1796 channel = ldvp->channel;
1797 ldgp = ldvp->ldgp;
1798
1799 if (!isLDOMguest(nxgep) && (!rcrp->started)) {
1800 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1801 "<== nxge_rx_intr: channel is not started"));
1802
1803 /*
1804 * We received an interrupt before the ring is started.
1805 */
1806 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1807 &cs.value);
1808 cs.value &= RX_DMA_CTL_STAT_WR1C;
1809 cs.bits.hdw.mex = 1;
1810 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1811 cs.value);
1812
1813 /*
1814 * Rearm this logical group if this is a single device
1815 * group.
1816 */
1817 if (ldgp->nldvs == 1) {
1818 if (isLDOMguest(nxgep)) {
1819 nxge_hio_ldgimgn(nxgep, ldgp);
1820 } else {
1821 ldgimgm_t mgm;
1822
1823 mgm.value = 0;
1824 mgm.bits.ldw.arm = 1;
1825 mgm.bits.ldw.timer = ldgp->ldg_timer;
1826
1827 NXGE_REG_WR64(handle,
1828 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1829 mgm.value);
1830 }
1831 }
1832 MUTEX_EXIT(&rcrp->lock);
1833 return (DDI_INTR_CLAIMED);
1834 }
1835
1836 ASSERT(rcrp->ldgp == ldgp);
1837 ASSERT(rcrp->ldvp == ldvp);
1838
1839 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1840
1841 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1842 "cs 0x%016llx rcrto 0x%x rcrthres %x",
1843 channel,
1844 cs.value,
1845 cs.bits.hdw.rcrto,
1846 cs.bits.hdw.rcrthres));
1847
1848 if (!rcrp->poll_flag) {
1849 mp = nxge_rx_pkts(nxgep, rcrp, cs, -1);
1850 }
1851
1852 /* error events. */
1853 if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1854 (void) nxge_rx_err_evnts(nxgep, channel, cs);
1855 }
1856
1857 /*
1858 * Enable the mailbox update interrupt if we want
1859 * to use mailbox. We probably don't need to use
1860 * mailbox as it only saves us one pio read.
1861 * Also write 1 to rcrthres and rcrto to clear
1862 * these two edge triggered bits.
1863 */
1864 cs.value &= RX_DMA_CTL_STAT_WR1C;
1865 cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1;
1866 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1867 cs.value);
1868
1869 /*
1870 * If the polling mode is enabled, disable the interrupt.
1871 */
1872 if (rcrp->poll_flag) {
1873 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1874 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1875 "(disabling interrupts)", channel, ldgp, ldvp));
1876
1877 /*
1878 * Disarm this logical group if this is a single device
1879 * group.
1880 */
1881 if (ldgp->nldvs == 1) {
1882 if (isLDOMguest(nxgep)) {
1883 ldgp->arm = B_FALSE;
1884 nxge_hio_ldgimgn(nxgep, ldgp);
1885 } else {
1886 ldgimgm_t mgm;
1887 mgm.value = 0;
1888 mgm.bits.ldw.arm = 0;
1889 NXGE_REG_WR64(handle,
1890 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1891 mgm.value);
1892 }
1893 }
1894 } else {
1895 /*
1896 * Rearm this logical group if this is a single device
1897 * group.
1898 */
1899 if (ldgp->nldvs == 1) {
1900 if (isLDOMguest(nxgep)) {
1901 nxge_hio_ldgimgn(nxgep, ldgp);
1902 } else {
1903 ldgimgm_t mgm;
1904
1905 mgm.value = 0;
1906 mgm.bits.ldw.arm = 1;
1907 mgm.bits.ldw.timer = ldgp->ldg_timer;
1908
1909 NXGE_REG_WR64(handle,
1910 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1911 mgm.value);
1912 }
1913 }
1914
1915 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1916 "==> nxge_rx_intr: rdc %d ldgp $%p "
1917 "exiting ISR (and call mac_rx_ring)", channel, ldgp));
1918 }
1919 MUTEX_EXIT(&rcrp->lock);
1920
1921 if (mp != NULL) {
1922 mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp,
1923 rcrp->rcr_gen_num);
1924 }
1925 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1926 return (DDI_INTR_CLAIMED);
1927 }
1928
1929 /*
1930 * This routine is the main packet receive processing function.
1931 * It gets the packet type, error code, and buffer related
1932 * information from the receive completion entry.
1933 * How many completion entries to process is based on the number of packets
1934 * queued by the hardware, a hardware maintained tail pointer
1935 * and a configurable receive packet count.
1936 *
1937 * A chain of message blocks will be created as result of processing
1938 * the completion entries. This chain of message blocks will be returned and
1939 * a hardware control status register will be updated with the number of
1940 * packets were removed from the hardware queue.
1941 *
1942 * The RCR ring lock is held when entering this function.
1943 */
1944 static mblk_t *
nxge_rx_pkts(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,rx_dma_ctl_stat_t cs,int bytes_to_pickup)1945 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1946 int bytes_to_pickup)
1947 {
1948 npi_handle_t handle;
1949 uint8_t channel;
1950 uint32_t comp_rd_index;
1951 p_rcr_entry_t rcr_desc_rd_head_p;
1952 p_rcr_entry_t rcr_desc_rd_head_pp;
1953 p_mblk_t nmp, mp_cont, head_mp, *tail_mp;
1954 uint16_t qlen, nrcr_read, npkt_read;
1955 uint32_t qlen_hw;
1956 boolean_t multi;
1957 rcrcfig_b_t rcr_cfg_b;
1958 int totallen = 0;
1959 #if defined(_BIG_ENDIAN)
1960 npi_status_t rs = NPI_SUCCESS;
1961 #endif
1962
1963 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
1964 "channel %d", rcr_p->rdc));
1965
1966 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1967 return (NULL);
1968 }
1969 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1970 channel = rcr_p->rdc;
1971
1972 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1973 "==> nxge_rx_pkts: START: rcr channel %d "
1974 "head_p $%p head_pp $%p index %d ",
1975 channel, rcr_p->rcr_desc_rd_head_p,
1976 rcr_p->rcr_desc_rd_head_pp,
1977 rcr_p->comp_rd_index));
1978
1979
1980 #if !defined(_BIG_ENDIAN)
1981 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
1982 #else
1983 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1984 if (rs != NPI_SUCCESS) {
1985 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
1986 "channel %d, get qlen failed 0x%08x",
1987 channel, rs));
1988 return (NULL);
1989 }
1990 #endif
1991 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
1992 "qlen %d", channel, qlen));
1993
1994
1995
1996 if (!qlen) {
1997 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1998 "==> nxge_rx_pkts:rcr channel %d "
1999 "qlen %d (no pkts)", channel, qlen));
2000
2001 return (NULL);
2002 }
2003
2004 comp_rd_index = rcr_p->comp_rd_index;
2005
2006 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
2007 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
2008 nrcr_read = npkt_read = 0;
2009
2010 /*
2011 * Number of packets queued
2012 * (The jumbo or multi packet will be counted as only one
2013 * packets and it may take up more than one completion entry).
2014 */
2015 qlen_hw = (qlen < nxge_max_rx_pkts) ?
2016 qlen : nxge_max_rx_pkts;
2017 head_mp = NULL;
2018 tail_mp = &head_mp;
2019 nmp = mp_cont = NULL;
2020 multi = B_FALSE;
2021
2022 while (qlen_hw) {
2023
2024 #ifdef NXGE_DEBUG
2025 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
2026 #endif
2027 /*
2028 * Process one completion ring entry.
2029 */
2030 nxge_receive_packet(nxgep,
2031 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
2032
2033 /*
2034 * message chaining modes
2035 */
2036 if (nmp) {
2037 nmp->b_next = NULL;
2038 if (!multi && !mp_cont) { /* frame fits a partition */
2039 *tail_mp = nmp;
2040 tail_mp = &nmp->b_next;
2041 totallen += MBLKL(nmp);
2042 nmp = NULL;
2043 } else if (multi && !mp_cont) { /* first segment */
2044 *tail_mp = nmp;
2045 tail_mp = &nmp->b_cont;
2046 totallen += MBLKL(nmp);
2047 } else if (multi && mp_cont) { /* mid of multi segs */
2048 *tail_mp = mp_cont;
2049 tail_mp = &mp_cont->b_cont;
2050 totallen += MBLKL(mp_cont);
2051 } else if (!multi && mp_cont) { /* last segment */
2052 *tail_mp = mp_cont;
2053 tail_mp = &nmp->b_next;
2054 totallen += MBLKL(mp_cont);
2055 nmp = NULL;
2056 }
2057 }
2058 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2059 "==> nxge_rx_pkts: loop: rcr channel %d "
2060 "before updating: multi %d "
2061 "nrcr_read %d "
2062 "npk read %d "
2063 "head_pp $%p index %d ",
2064 channel,
2065 multi,
2066 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2067 comp_rd_index));
2068
2069 if (!multi) {
2070 qlen_hw--;
2071 npkt_read++;
2072 }
2073
2074 /*
2075 * Update the next read entry.
2076 */
2077 comp_rd_index = NEXT_ENTRY(comp_rd_index,
2078 rcr_p->comp_wrap_mask);
2079
2080 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
2081 rcr_p->rcr_desc_first_p,
2082 rcr_p->rcr_desc_last_p);
2083
2084 nrcr_read++;
2085
2086 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2087 "<== nxge_rx_pkts: (SAM, process one packet) "
2088 "nrcr_read %d",
2089 nrcr_read));
2090 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2091 "==> nxge_rx_pkts: loop: rcr channel %d "
2092 "multi %d "
2093 "nrcr_read %d "
2094 "npk read %d "
2095 "head_pp $%p index %d ",
2096 channel,
2097 multi,
2098 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2099 comp_rd_index));
2100
2101 if ((bytes_to_pickup != -1) &&
2102 (totallen >= bytes_to_pickup)) {
2103 break;
2104 }
2105 }
2106
2107 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2108 rcr_p->comp_rd_index = comp_rd_index;
2109 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2110 if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2111 (nxgep->intr_threshold != rcr_p->intr_threshold)) {
2112
2113 rcr_p->intr_timeout = (nxgep->intr_timeout <
2114 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
2115 nxgep->intr_timeout;
2116
2117 rcr_p->intr_threshold = (nxgep->intr_threshold <
2118 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
2119 nxgep->intr_threshold;
2120
2121 rcr_cfg_b.value = 0x0ULL;
2122 rcr_cfg_b.bits.ldw.entout = 1;
2123 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
2124 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
2125
2126 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
2127 channel, rcr_cfg_b.value);
2128 }
2129
2130 cs.bits.ldw.pktread = npkt_read;
2131 cs.bits.ldw.ptrread = nrcr_read;
2132 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2133 channel, cs.value);
2134 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2135 "==> nxge_rx_pkts: EXIT: rcr channel %d "
2136 "head_pp $%p index %016llx ",
2137 channel,
2138 rcr_p->rcr_desc_rd_head_pp,
2139 rcr_p->comp_rd_index));
2140 /*
2141 * Update RCR buffer pointer read and number of packets
2142 * read.
2143 */
2144
2145 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
2146 "channel %d", rcr_p->rdc));
2147
2148 return (head_mp);
2149 }
2150
2151 void
nxge_receive_packet(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rcr_entry_t rcr_desc_rd_head_p,boolean_t * multi_p,mblk_t ** mp,mblk_t ** mp_cont)2152 nxge_receive_packet(p_nxge_t nxgep,
2153 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2154 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2155 {
2156 p_mblk_t nmp = NULL;
2157 uint64_t multi;
2158 uint64_t dcf_err;
2159 uint8_t channel;
2160
2161 boolean_t first_entry = B_TRUE;
2162 boolean_t is_tcp_udp = B_FALSE;
2163 boolean_t buffer_free = B_FALSE;
2164 boolean_t error_send_up = B_FALSE;
2165 uint8_t error_type;
2166 uint16_t l2_len;
2167 uint16_t skip_len;
2168 uint8_t pktbufsz_type;
2169 uint64_t rcr_entry;
2170 uint64_t *pkt_buf_addr_pp;
2171 uint64_t *pkt_buf_addr_p;
2172 uint32_t buf_offset;
2173 uint32_t bsize;
2174 uint32_t error_disp_cnt;
2175 uint32_t msg_index;
2176 p_rx_rbr_ring_t rx_rbr_p;
2177 p_rx_msg_t *rx_msg_ring_p;
2178 p_rx_msg_t rx_msg_p;
2179 uint16_t sw_offset_bytes = 0, hdr_size = 0;
2180 nxge_status_t status = NXGE_OK;
2181 boolean_t is_valid = B_FALSE;
2182 p_nxge_rx_ring_stats_t rdc_stats;
2183 uint32_t bytes_read;
2184 uint64_t pkt_type;
2185 uint64_t frag;
2186 boolean_t pkt_too_long_err = B_FALSE;
2187 #ifdef NXGE_DEBUG
2188 int dump_len;
2189 #endif
2190 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
2191 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
2192
2193 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
2194
2195 multi = (rcr_entry & RCR_MULTI_MASK);
2196 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
2197 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
2198
2199 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
2200 frag = (rcr_entry & RCR_FRAG_MASK);
2201
2202 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
2203
2204 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
2205 RCR_PKTBUFSZ_SHIFT);
2206 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
2207 RCR_PKT_BUF_ADDR_SHIFT);
2208
2209 channel = rcr_p->rdc;
2210
2211 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2212 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2213 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2214 "error_type 0x%x pkt_type 0x%x "
2215 "pktbufsz_type %d ",
2216 rcr_desc_rd_head_p,
2217 rcr_entry, pkt_buf_addr_pp, l2_len,
2218 multi,
2219 error_type,
2220 pkt_type,
2221 pktbufsz_type));
2222
2223 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2224 "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
2225 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
2226 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
2227 rcr_entry, pkt_buf_addr_pp, l2_len,
2228 multi,
2229 error_type,
2230 pkt_type));
2231
2232 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2233 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2234 "full pkt_buf_addr_pp $%p l2_len %d",
2235 rcr_entry, pkt_buf_addr_pp, l2_len));
2236
2237 /* get the stats ptr */
2238 rdc_stats = rcr_p->rdc_stats;
2239
2240 if (!l2_len) {
2241
2242 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2243 "<== nxge_receive_packet: failed: l2 length is 0."));
2244 return;
2245 }
2246
2247 /*
2248 * Software workaround for BMAC hardware limitation that allows
2249 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2250 * instead of 0x2400 for jumbo.
2251 */
2252 if (l2_len > nxgep->mac.maxframesize) {
2253 pkt_too_long_err = B_TRUE;
2254 }
2255
2256 /* Hardware sends us 4 bytes of CRC as no stripping is done. */
2257 l2_len -= ETHERFCSL;
2258
2259 /* shift 6 bits to get the full io address */
2260 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
2261 RCR_PKT_BUF_ADDR_SHIFT_FULL);
2262 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2263 "==> (rbr) nxge_receive_packet: entry 0x%0llx "
2264 "full pkt_buf_addr_pp $%p l2_len %d",
2265 rcr_entry, pkt_buf_addr_pp, l2_len));
2266
2267 rx_rbr_p = rcr_p->rx_rbr_p;
2268 rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
2269
2270 if (first_entry) {
2271 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
2272 RXDMA_HDR_SIZE_DEFAULT);
2273
2274 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2275 "==> nxge_receive_packet: first entry 0x%016llx "
2276 "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2277 rcr_entry, pkt_buf_addr_pp, l2_len,
2278 hdr_size));
2279 }
2280
2281 MUTEX_ENTER(&rx_rbr_p->lock);
2282
2283 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2284 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2285 "full pkt_buf_addr_pp $%p l2_len %d",
2286 rcr_entry, pkt_buf_addr_pp, l2_len));
2287
2288 /*
2289 * Packet buffer address in the completion entry points
2290 * to the starting buffer address (offset 0).
2291 * Use the starting buffer address to locate the corresponding
2292 * kernel address.
2293 */
2294 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
2295 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
2296 &buf_offset,
2297 &msg_index);
2298
2299 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2300 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2301 "full pkt_buf_addr_pp $%p l2_len %d",
2302 rcr_entry, pkt_buf_addr_pp, l2_len));
2303
2304 if (status != NXGE_OK) {
2305 MUTEX_EXIT(&rx_rbr_p->lock);
2306 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2307 "<== nxge_receive_packet: found vaddr failed %d",
2308 status));
2309 return;
2310 }
2311
2312 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2313 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
2314 "full pkt_buf_addr_pp $%p l2_len %d",
2315 rcr_entry, pkt_buf_addr_pp, l2_len));
2316
2317 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2318 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2319 "full pkt_buf_addr_pp $%p l2_len %d",
2320 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2321
2322 rx_msg_p = rx_msg_ring_p[msg_index];
2323
2324 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2325 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
2326 "full pkt_buf_addr_pp $%p l2_len %d",
2327 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
2328
2329 switch (pktbufsz_type) {
2330 case RCR_PKTBUFSZ_0:
2331 bsize = rx_rbr_p->pkt_buf_size0_bytes;
2332 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2333 "==> nxge_receive_packet: 0 buf %d", bsize));
2334 break;
2335 case RCR_PKTBUFSZ_1:
2336 bsize = rx_rbr_p->pkt_buf_size1_bytes;
2337 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2338 "==> nxge_receive_packet: 1 buf %d", bsize));
2339 break;
2340 case RCR_PKTBUFSZ_2:
2341 bsize = rx_rbr_p->pkt_buf_size2_bytes;
2342 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2343 "==> nxge_receive_packet: 2 buf %d", bsize));
2344 break;
2345 case RCR_SINGLE_BLOCK:
2346 bsize = rx_msg_p->block_size;
2347 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2348 "==> nxge_receive_packet: single %d", bsize));
2349
2350 break;
2351 default:
2352 MUTEX_EXIT(&rx_rbr_p->lock);
2353 return;
2354 }
2355
2356 switch (nxge_rdc_buf_offset) {
2357 case SW_OFFSET_NO_OFFSET:
2358 sw_offset_bytes = 0;
2359 break;
2360 case SW_OFFSET_64:
2361 sw_offset_bytes = 64;
2362 break;
2363 case SW_OFFSET_128:
2364 sw_offset_bytes = 128;
2365 break;
2366 case SW_OFFSET_192:
2367 sw_offset_bytes = 192;
2368 break;
2369 case SW_OFFSET_256:
2370 sw_offset_bytes = 256;
2371 break;
2372 case SW_OFFSET_320:
2373 sw_offset_bytes = 320;
2374 break;
2375 case SW_OFFSET_384:
2376 sw_offset_bytes = 384;
2377 break;
2378 case SW_OFFSET_448:
2379 sw_offset_bytes = 448;
2380 break;
2381 default:
2382 sw_offset_bytes = 0;
2383 break;
2384 }
2385
2386 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2387 (buf_offset + sw_offset_bytes),
2388 (hdr_size + l2_len),
2389 DDI_DMA_SYNC_FORCPU);
2390
2391 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2392 "==> nxge_receive_packet: after first dump:usage count"));
2393
2394 if (rx_msg_p->cur_usage_cnt == 0) {
2395 if (rx_rbr_p->rbr_use_bcopy) {
2396 atomic_inc_32(&rx_rbr_p->rbr_consumed);
2397 if (rx_rbr_p->rbr_consumed <
2398 rx_rbr_p->rbr_threshold_hi) {
2399 if (rx_rbr_p->rbr_threshold_lo == 0 ||
2400 ((rx_rbr_p->rbr_consumed >=
2401 rx_rbr_p->rbr_threshold_lo) &&
2402 (rx_rbr_p->rbr_bufsize_type >=
2403 pktbufsz_type))) {
2404 rx_msg_p->rx_use_bcopy = B_TRUE;
2405 }
2406 } else {
2407 rx_msg_p->rx_use_bcopy = B_TRUE;
2408 }
2409 }
2410 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2411 "==> nxge_receive_packet: buf %d (new block) ",
2412 bsize));
2413
2414 rx_msg_p->pkt_buf_size_code = pktbufsz_type;
2415 rx_msg_p->pkt_buf_size = bsize;
2416 rx_msg_p->cur_usage_cnt = 1;
2417 if (pktbufsz_type == RCR_SINGLE_BLOCK) {
2418 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2419 "==> nxge_receive_packet: buf %d "
2420 "(single block) ",
2421 bsize));
2422 /*
2423 * Buffer can be reused once the free function
2424 * is called.
2425 */
2426 rx_msg_p->max_usage_cnt = 1;
2427 buffer_free = B_TRUE;
2428 } else {
2429 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
2430 if (rx_msg_p->max_usage_cnt == 1) {
2431 buffer_free = B_TRUE;
2432 }
2433 }
2434 } else {
2435 rx_msg_p->cur_usage_cnt++;
2436 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
2437 buffer_free = B_TRUE;
2438 }
2439 }
2440
2441 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2442 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
2443 msg_index, l2_len,
2444 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
2445
2446 if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
2447 rdc_stats->ierrors++;
2448 if (dcf_err) {
2449 rdc_stats->dcf_err++;
2450 #ifdef NXGE_DEBUG
2451 if (!rdc_stats->dcf_err) {
2452 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2453 "nxge_receive_packet: channel %d dcf_err rcr"
2454 " 0x%llx", channel, rcr_entry));
2455 }
2456 #endif
2457 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0,
2458 NXGE_FM_EREPORT_RDMC_DCF_ERR);
2459 } else if (pkt_too_long_err) {
2460 rdc_stats->pkt_too_long_err++;
2461 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
2462 " channel %d packet length [%d] > "
2463 "maxframesize [%d]", channel, l2_len + ETHERFCSL,
2464 nxgep->mac.maxframesize));
2465 } else {
2466 /* Update error stats */
2467 error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2468 rdc_stats->errlog.compl_err_type = error_type;
2469
2470 switch (error_type) {
2471 /*
2472 * Do not send FMA ereport for RCR_L2_ERROR and
2473 * RCR_L4_CSUM_ERROR because most likely they indicate
2474 * back pressure rather than HW failures.
2475 */
2476 case RCR_L2_ERROR:
2477 rdc_stats->l2_err++;
2478 if (rdc_stats->l2_err <
2479 error_disp_cnt) {
2480 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2481 " nxge_receive_packet:"
2482 " channel %d RCR L2_ERROR",
2483 channel));
2484 }
2485 break;
2486 case RCR_L4_CSUM_ERROR:
2487 error_send_up = B_TRUE;
2488 rdc_stats->l4_cksum_err++;
2489 if (rdc_stats->l4_cksum_err <
2490 error_disp_cnt) {
2491 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2492 " nxge_receive_packet:"
2493 " channel %d"
2494 " RCR L4_CSUM_ERROR", channel));
2495 }
2496 break;
2497 /*
2498 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
2499 * RCR_ZCP_SOFT_ERROR because they reflect the same
2500 * FFLP and ZCP errors that have been reported by
2501 * nxge_fflp.c and nxge_zcp.c.
2502 */
2503 case RCR_FFLP_SOFT_ERROR:
2504 error_send_up = B_TRUE;
2505 rdc_stats->fflp_soft_err++;
2506 if (rdc_stats->fflp_soft_err <
2507 error_disp_cnt) {
2508 NXGE_ERROR_MSG((nxgep,
2509 NXGE_ERR_CTL,
2510 " nxge_receive_packet:"
2511 " channel %d"
2512 " RCR FFLP_SOFT_ERROR", channel));
2513 }
2514 break;
2515 case RCR_ZCP_SOFT_ERROR:
2516 error_send_up = B_TRUE;
2517 rdc_stats->fflp_soft_err++;
2518 if (rdc_stats->zcp_soft_err <
2519 error_disp_cnt)
2520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2521 " nxge_receive_packet: Channel %d"
2522 " RCR ZCP_SOFT_ERROR", channel));
2523 break;
2524 default:
2525 rdc_stats->rcr_unknown_err++;
2526 if (rdc_stats->rcr_unknown_err
2527 < error_disp_cnt) {
2528 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2529 " nxge_receive_packet: Channel %d"
2530 " RCR entry 0x%llx error 0x%x",
2531 rcr_entry, channel, error_type));
2532 }
2533 break;
2534 }
2535 }
2536
2537 /*
2538 * Update and repost buffer block if max usage
2539 * count is reached.
2540 */
2541 if (error_send_up == B_FALSE) {
2542 atomic_inc_32(&rx_msg_p->ref_cnt);
2543 if (buffer_free == B_TRUE) {
2544 rx_msg_p->free = B_TRUE;
2545 }
2546
2547 MUTEX_EXIT(&rx_rbr_p->lock);
2548 nxge_freeb(rx_msg_p);
2549 return;
2550 }
2551 }
2552
2553 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2554 "==> nxge_receive_packet: DMA sync second "));
2555
2556 bytes_read = rcr_p->rcvd_pkt_bytes;
2557 skip_len = sw_offset_bytes + hdr_size;
2558 if (!rx_msg_p->rx_use_bcopy) {
2559 /*
2560 * For loaned up buffers, the driver reference count
2561 * will be incremented first and then the free state.
2562 */
2563 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
2564 if (first_entry) {
2565 nmp->b_rptr = &nmp->b_rptr[skip_len];
2566 if (l2_len < bsize - skip_len) {
2567 nmp->b_wptr = &nmp->b_rptr[l2_len];
2568 } else {
2569 nmp->b_wptr = &nmp->b_rptr[bsize
2570 - skip_len];
2571 }
2572 } else {
2573 if (l2_len - bytes_read < bsize) {
2574 nmp->b_wptr =
2575 &nmp->b_rptr[l2_len - bytes_read];
2576 } else {
2577 nmp->b_wptr = &nmp->b_rptr[bsize];
2578 }
2579 }
2580 }
2581 } else {
2582 if (first_entry) {
2583 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
2584 l2_len < bsize - skip_len ?
2585 l2_len : bsize - skip_len);
2586 } else {
2587 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
2588 l2_len - bytes_read < bsize ?
2589 l2_len - bytes_read : bsize);
2590 }
2591 }
2592 if (nmp != NULL) {
2593 if (first_entry) {
2594 /*
2595 * Jumbo packets may be received with more than one
2596 * buffer, increment ipackets for the first entry only.
2597 */
2598 rdc_stats->ipackets++;
2599
2600 /* Update ibytes for kstat. */
2601 rdc_stats->ibytes += skip_len
2602 + l2_len < bsize ? l2_len : bsize;
2603 /*
2604 * Update the number of bytes read so far for the
2605 * current frame.
2606 */
2607 bytes_read = nmp->b_wptr - nmp->b_rptr;
2608 } else {
2609 rdc_stats->ibytes += l2_len - bytes_read < bsize ?
2610 l2_len - bytes_read : bsize;
2611 bytes_read += nmp->b_wptr - nmp->b_rptr;
2612 }
2613
2614 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2615 "==> nxge_receive_packet after dupb: "
2616 "rbr consumed %d "
2617 "pktbufsz_type %d "
2618 "nmp $%p rptr $%p wptr $%p "
2619 "buf_offset %d bzise %d l2_len %d skip_len %d",
2620 rx_rbr_p->rbr_consumed,
2621 pktbufsz_type,
2622 nmp, nmp->b_rptr, nmp->b_wptr,
2623 buf_offset, bsize, l2_len, skip_len));
2624 } else {
2625 cmn_err(CE_WARN, "!nxge_receive_packet: "
2626 "update stats (error)");
2627 atomic_inc_32(&rx_msg_p->ref_cnt);
2628 if (buffer_free == B_TRUE) {
2629 rx_msg_p->free = B_TRUE;
2630 }
2631 MUTEX_EXIT(&rx_rbr_p->lock);
2632 nxge_freeb(rx_msg_p);
2633 return;
2634 }
2635
2636 if (buffer_free == B_TRUE) {
2637 rx_msg_p->free = B_TRUE;
2638 }
2639
2640 is_valid = (nmp != NULL);
2641
2642 rcr_p->rcvd_pkt_bytes = bytes_read;
2643
2644 MUTEX_EXIT(&rx_rbr_p->lock);
2645
2646 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2647 atomic_inc_32(&rx_msg_p->ref_cnt);
2648 nxge_freeb(rx_msg_p);
2649 }
2650
2651 if (is_valid) {
2652 nmp->b_cont = NULL;
2653 if (first_entry) {
2654 *mp = nmp;
2655 *mp_cont = NULL;
2656 } else {
2657 *mp_cont = nmp;
2658 }
2659 }
2660
2661 /*
2662 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2663 * If a packet is not fragmented and no error bit is set, then
2664 * L4 checksum is OK.
2665 */
2666
2667 if (is_valid && !multi) {
2668 /*
2669 * If the checksum flag nxge_chksum_offload
2670 * is 1, TCP and UDP packets can be sent
2671 * up with good checksum. If the checksum flag
2672 * is set to 0, checksum reporting will apply to
2673 * TCP packets only (workaround for a hardware bug).
2674 * If the checksum flag nxge_cksum_offload is
2675 * greater than 1, both TCP and UDP packets
2676 * will not be reported its hardware checksum results.
2677 */
2678 if (nxge_cksum_offload == 1) {
2679 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2680 pkt_type == RCR_PKT_IS_UDP) ?
2681 B_TRUE: B_FALSE);
2682 } else if (!nxge_cksum_offload) {
2683 /* TCP checksum only. */
2684 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
2685 B_TRUE: B_FALSE);
2686 }
2687
2688 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
2689 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
2690 is_valid, multi, is_tcp_udp, frag, error_type));
2691
2692 if (is_tcp_udp && !frag && !error_type) {
2693 mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
2694 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2695 "==> nxge_receive_packet: Full tcp/udp cksum "
2696 "is_valid 0x%x multi 0x%llx pkt %d frag %d "
2697 "error %d",
2698 is_valid, multi, is_tcp_udp, frag, error_type));
2699 }
2700 }
2701
2702 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2703 "==> nxge_receive_packet: *mp 0x%016llx", *mp));
2704
2705 *multi_p = (multi == RCR_MULTI_MASK);
2706 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2707 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2708 *multi_p, nmp, *mp, *mp_cont));
2709 }
2710
2711 /*
2712 * Enable polling for a ring. Interrupt for the ring is disabled when
2713 * the nxge interrupt comes (see nxge_rx_intr).
2714 */
2715 int
nxge_enable_poll(void * arg)2716 nxge_enable_poll(void *arg)
2717 {
2718 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2719 p_rx_rcr_ring_t ringp;
2720 p_nxge_t nxgep;
2721 p_nxge_ldg_t ldgp;
2722 uint32_t channel;
2723
2724 if (ring_handle == NULL) {
2725 ASSERT(ring_handle != NULL);
2726 return (0);
2727 }
2728
2729 nxgep = ring_handle->nxgep;
2730 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2731 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2732 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2733 "==> nxge_enable_poll: rdc %d ", ringp->rdc));
2734 ldgp = ringp->ldgp;
2735 if (ldgp == NULL) {
2736 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2737 "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2738 ringp->rdc));
2739 return (0);
2740 }
2741
2742 MUTEX_ENTER(&ringp->lock);
2743 /* enable polling */
2744 if (ringp->poll_flag == 0) {
2745 ringp->poll_flag = 1;
2746 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2747 "==> nxge_enable_poll: rdc %d set poll flag to 1",
2748 ringp->rdc));
2749 }
2750
2751 MUTEX_EXIT(&ringp->lock);
2752 return (0);
2753 }
2754 /*
2755 * Disable polling for a ring and enable its interrupt.
2756 */
2757 int
nxge_disable_poll(void * arg)2758 nxge_disable_poll(void *arg)
2759 {
2760 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2761 p_rx_rcr_ring_t ringp;
2762 p_nxge_t nxgep;
2763 uint32_t channel;
2764
2765 if (ring_handle == NULL) {
2766 ASSERT(ring_handle != NULL);
2767 return (0);
2768 }
2769
2770 nxgep = ring_handle->nxgep;
2771 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2772 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2773
2774 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2775 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
2776
2777 MUTEX_ENTER(&ringp->lock);
2778
2779 /* disable polling: enable interrupt */
2780 if (ringp->poll_flag) {
2781 npi_handle_t handle;
2782 rx_dma_ctl_stat_t cs;
2783 uint8_t channel;
2784 p_nxge_ldg_t ldgp;
2785
2786 /*
2787 * Get the control and status for this channel.
2788 */
2789 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2790 channel = ringp->rdc;
2791 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
2792 channel, &cs.value);
2793
2794 /*
2795 * Enable mailbox update
2796 * Since packets were not read and the hardware uses
2797 * bits pktread and ptrread to update the queue
2798 * length, we need to set both bits to 0.
2799 */
2800 cs.bits.ldw.pktread = 0;
2801 cs.bits.ldw.ptrread = 0;
2802 cs.bits.hdw.mex = 1;
2803 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2804 cs.value);
2805
2806 /*
2807 * Rearm this logical group if this is a single device
2808 * group.
2809 */
2810 ldgp = ringp->ldgp;
2811 if (ldgp == NULL) {
2812 ringp->poll_flag = 0;
2813 MUTEX_EXIT(&ringp->lock);
2814 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2815 "==> nxge_disable_poll: no ldgp rdc %d "
2816 "(still set poll to 0", ringp->rdc));
2817 return (0);
2818 }
2819 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2820 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2821 ringp->rdc, ldgp));
2822 if (ldgp->nldvs == 1) {
2823 if (isLDOMguest(nxgep)) {
2824 ldgp->arm = B_TRUE;
2825 nxge_hio_ldgimgn(nxgep, ldgp);
2826 } else {
2827 ldgimgm_t mgm;
2828 mgm.value = 0;
2829 mgm.bits.ldw.arm = 1;
2830 mgm.bits.ldw.timer = ldgp->ldg_timer;
2831 NXGE_REG_WR64(handle,
2832 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
2833 mgm.value);
2834 }
2835 }
2836 ringp->poll_flag = 0;
2837 }
2838
2839 MUTEX_EXIT(&ringp->lock);
2840 return (0);
2841 }
2842
2843 /*
2844 * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2845 */
2846 mblk_t *
nxge_rx_poll(void * arg,int bytes_to_pickup)2847 nxge_rx_poll(void *arg, int bytes_to_pickup)
2848 {
2849 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2850 p_rx_rcr_ring_t rcr_p;
2851 p_nxge_t nxgep;
2852 npi_handle_t handle;
2853 rx_dma_ctl_stat_t cs;
2854 mblk_t *mblk;
2855 p_nxge_ldv_t ldvp;
2856 uint32_t channel;
2857
2858 nxgep = ring_handle->nxgep;
2859
2860 /*
2861 * Get the control and status for this channel.
2862 */
2863 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2864 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2865 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
2866 MUTEX_ENTER(&rcr_p->lock);
2867 ASSERT(rcr_p->poll_flag == 1);
2868
2869 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
2870
2871 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2872 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2873 rcr_p->rdc, rcr_p->poll_flag));
2874 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
2875
2876 ldvp = rcr_p->ldvp;
2877 /* error events. */
2878 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
2879 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
2880 }
2881
2882 MUTEX_EXIT(&rcr_p->lock);
2883
2884 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2885 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
2886 return (mblk);
2887 }
2888
2889
2890 /*ARGSUSED*/
2891 static nxge_status_t
nxge_rx_err_evnts(p_nxge_t nxgep,int channel,rx_dma_ctl_stat_t cs)2892 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2893 {
2894 p_nxge_rx_ring_stats_t rdc_stats;
2895 npi_handle_t handle;
2896 npi_status_t rs;
2897 boolean_t rxchan_fatal = B_FALSE;
2898 boolean_t rxport_fatal = B_FALSE;
2899 uint8_t portn;
2900 nxge_status_t status = NXGE_OK;
2901 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX;
2902 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
2903
2904 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2905 portn = nxgep->mac.portnum;
2906 rdc_stats = &nxgep->statsp->rdc_stats[channel];
2907
2908 if (cs.bits.hdw.rbr_tmout) {
2909 rdc_stats->rx_rbr_tmout++;
2910 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2911 NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
2912 rxchan_fatal = B_TRUE;
2913 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2914 "==> nxge_rx_err_evnts: rx_rbr_timeout"));
2915 }
2916 if (cs.bits.hdw.rsp_cnt_err) {
2917 rdc_stats->rsp_cnt_err++;
2918 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2919 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
2920 rxchan_fatal = B_TRUE;
2921 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2922 "==> nxge_rx_err_evnts(channel %d): "
2923 "rsp_cnt_err", channel));
2924 }
2925 if (cs.bits.hdw.byte_en_bus) {
2926 rdc_stats->byte_en_bus++;
2927 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2928 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
2929 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2930 "==> nxge_rx_err_evnts(channel %d): "
2931 "fatal error: byte_en_bus", channel));
2932 rxchan_fatal = B_TRUE;
2933 }
2934 if (cs.bits.hdw.rsp_dat_err) {
2935 rdc_stats->rsp_dat_err++;
2936 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2937 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
2938 rxchan_fatal = B_TRUE;
2939 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2940 "==> nxge_rx_err_evnts(channel %d): "
2941 "fatal error: rsp_dat_err", channel));
2942 }
2943 if (cs.bits.hdw.rcr_ack_err) {
2944 rdc_stats->rcr_ack_err++;
2945 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2946 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
2947 rxchan_fatal = B_TRUE;
2948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2949 "==> nxge_rx_err_evnts(channel %d): "
2950 "fatal error: rcr_ack_err", channel));
2951 }
2952 if (cs.bits.hdw.dc_fifo_err) {
2953 rdc_stats->dc_fifo_err++;
2954 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2955 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
2956 /* This is not a fatal error! */
2957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2958 "==> nxge_rx_err_evnts(channel %d): "
2959 "dc_fifo_err", channel));
2960 rxport_fatal = B_TRUE;
2961 }
2962 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
2963 if ((rs = npi_rxdma_ring_perr_stat_get(handle,
2964 &rdc_stats->errlog.pre_par,
2965 &rdc_stats->errlog.sha_par))
2966 != NPI_SUCCESS) {
2967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2968 "==> nxge_rx_err_evnts(channel %d): "
2969 "rcr_sha_par: get perr", channel));
2970 return (NXGE_ERROR | rs);
2971 }
2972 if (cs.bits.hdw.rcr_sha_par) {
2973 rdc_stats->rcr_sha_par++;
2974 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2975 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
2976 rxchan_fatal = B_TRUE;
2977 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2978 "==> nxge_rx_err_evnts(channel %d): "
2979 "fatal error: rcr_sha_par", channel));
2980 }
2981 if (cs.bits.hdw.rbr_pre_par) {
2982 rdc_stats->rbr_pre_par++;
2983 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
2984 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
2985 rxchan_fatal = B_TRUE;
2986 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2987 "==> nxge_rx_err_evnts(channel %d): "
2988 "fatal error: rbr_pre_par", channel));
2989 }
2990 }
2991 /*
2992 * The Following 4 status bits are for information, the system
2993 * is running fine. There is no need to send FMA ereports or
2994 * log messages.
2995 */
2996 if (cs.bits.hdw.port_drop_pkt) {
2997 rdc_stats->port_drop_pkt++;
2998 }
2999 if (cs.bits.hdw.wred_drop) {
3000 rdc_stats->wred_drop++;
3001 }
3002 if (cs.bits.hdw.rbr_pre_empty) {
3003 rdc_stats->rbr_pre_empty++;
3004 }
3005 if (cs.bits.hdw.rcr_shadow_full) {
3006 rdc_stats->rcr_shadow_full++;
3007 }
3008 if (cs.bits.hdw.config_err) {
3009 rdc_stats->config_err++;
3010 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3011 NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
3012 rxchan_fatal = B_TRUE;
3013 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3014 "==> nxge_rx_err_evnts(channel %d): "
3015 "config error", channel));
3016 }
3017 if (cs.bits.hdw.rcrincon) {
3018 rdc_stats->rcrincon++;
3019 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3020 NXGE_FM_EREPORT_RDMC_RCRINCON);
3021 rxchan_fatal = B_TRUE;
3022 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3023 "==> nxge_rx_err_evnts(channel %d): "
3024 "fatal error: rcrincon error", channel));
3025 }
3026 if (cs.bits.hdw.rcrfull) {
3027 rdc_stats->rcrfull++;
3028 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3029 NXGE_FM_EREPORT_RDMC_RCRFULL);
3030 rxchan_fatal = B_TRUE;
3031 if (rdc_stats->rcrfull < error_disp_cnt) {
3032 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3033 "==> nxge_rx_err_evnts(channel %d): "
3034 "fatal error: rcrfull error", channel));
3035 }
3036 }
3037 if (cs.bits.hdw.rbr_empty) {
3038 /*
3039 * This bit is for information, there is no need
3040 * send FMA ereport or log a message.
3041 */
3042 rdc_stats->rbr_empty++;
3043 }
3044 if (cs.bits.hdw.rbrfull) {
3045 rdc_stats->rbrfull++;
3046 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3047 NXGE_FM_EREPORT_RDMC_RBRFULL);
3048 rxchan_fatal = B_TRUE;
3049 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3050 "==> nxge_rx_err_evnts(channel %d): "
3051 "fatal error: rbr_full error", channel));
3052 }
3053 if (cs.bits.hdw.rbrlogpage) {
3054 rdc_stats->rbrlogpage++;
3055 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3056 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
3057 rxchan_fatal = B_TRUE;
3058 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3059 "==> nxge_rx_err_evnts(channel %d): "
3060 "fatal error: rbr logical page error", channel));
3061 }
3062 if (cs.bits.hdw.cfiglogpage) {
3063 rdc_stats->cfiglogpage++;
3064 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
3065 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
3066 rxchan_fatal = B_TRUE;
3067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3068 "==> nxge_rx_err_evnts(channel %d): "
3069 "fatal error: cfig logical page error", channel));
3070 }
3071
3072 if (rxport_fatal) {
3073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3074 " nxge_rx_err_evnts: fatal error on Port #%d\n",
3075 portn));
3076 if (isLDOMguest(nxgep)) {
3077 status = NXGE_ERROR;
3078 } else {
3079 status = nxge_ipp_fatal_err_recover(nxgep);
3080 if (status == NXGE_OK) {
3081 FM_SERVICE_RESTORED(nxgep);
3082 }
3083 }
3084 }
3085
3086 if (rxchan_fatal) {
3087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3088 " nxge_rx_err_evnts: fatal error on Channel #%d\n",
3089 channel));
3090 if (isLDOMguest(nxgep)) {
3091 status = NXGE_ERROR;
3092 } else {
3093 status = nxge_rxdma_fatal_err_recover(nxgep, channel);
3094 if (status == NXGE_OK) {
3095 FM_SERVICE_RESTORED(nxgep);
3096 }
3097 }
3098 }
3099
3100 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
3101
3102 return (status);
3103 }
3104
3105 /*
3106 * nxge_rdc_hvio_setup
3107 *
3108 * This code appears to setup some Hypervisor variables.
3109 *
3110 * Arguments:
3111 * nxgep
3112 * channel
3113 *
3114 * Notes:
3115 * What does NIU_LP_WORKAROUND mean?
3116 *
3117 * NPI/NXGE function calls:
3118 * na
3119 *
3120 * Context:
3121 * Any domain
3122 */
3123 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3124 static void
nxge_rdc_hvio_setup(nxge_t * nxgep,int channel)3125 nxge_rdc_hvio_setup(
3126 nxge_t *nxgep, int channel)
3127 {
3128 nxge_dma_common_t *dma_common;
3129 nxge_dma_common_t *dma_control;
3130 rx_rbr_ring_t *ring;
3131
3132 ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3133 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3134
3135 ring->hv_set = B_FALSE;
3136
3137 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
3138 dma_common->orig_ioaddr_pp;
3139 ring->hv_rx_buf_ioaddr_size = (uint64_t)
3140 dma_common->orig_alength;
3141
3142 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3143 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
3144 channel, ring->hv_rx_buf_base_ioaddr_pp,
3145 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
3146 dma_common->orig_alength, dma_common->orig_alength));
3147
3148 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3149
3150 ring->hv_rx_cntl_base_ioaddr_pp =
3151 (uint64_t)dma_control->orig_ioaddr_pp;
3152 ring->hv_rx_cntl_ioaddr_size =
3153 (uint64_t)dma_control->orig_alength;
3154
3155 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
3156 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
3157 channel, ring->hv_rx_cntl_base_ioaddr_pp,
3158 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
3159 dma_control->orig_alength, dma_control->orig_alength));
3160 }
3161 #endif
3162
3163 /*
3164 * nxge_map_rxdma
3165 *
3166 * Map an RDC into our kernel space.
3167 *
3168 * Arguments:
3169 * nxgep
3170 * channel The channel to map.
3171 *
3172 * Notes:
3173 * 1. Allocate & initialise a memory pool, if necessary.
3174 * 2. Allocate however many receive buffers are required.
3175 * 3. Setup buffers, descriptors, and mailbox.
3176 *
3177 * NPI/NXGE function calls:
3178 * nxge_alloc_rx_mem_pool()
3179 * nxge_alloc_rbb()
3180 * nxge_map_rxdma_channel()
3181 *
3182 * Registers accessed:
3183 *
3184 * Context:
3185 * Any domain
3186 */
3187 static nxge_status_t
nxge_map_rxdma(p_nxge_t nxgep,int channel)3188 nxge_map_rxdma(p_nxge_t nxgep, int channel)
3189 {
3190 nxge_dma_common_t **data;
3191 nxge_dma_common_t **control;
3192 rx_rbr_ring_t **rbr_ring;
3193 rx_rcr_ring_t **rcr_ring;
3194 rx_mbox_t **mailbox;
3195 uint32_t chunks;
3196
3197 nxge_status_t status;
3198
3199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
3200
3201 if (!nxgep->rx_buf_pool_p) {
3202 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
3203 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3204 "<== nxge_map_rxdma: buf not allocated"));
3205 return (NXGE_ERROR);
3206 }
3207 }
3208
3209 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
3210 return (NXGE_ERROR);
3211
3212 /*
3213 * Map descriptors from the buffer polls for each dma channel.
3214 */
3215
3216 /*
3217 * Set up and prepare buffer blocks, descriptors
3218 * and mailbox.
3219 */
3220 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
3221 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
3222 chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
3223
3224 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
3225 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
3226
3227 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3228
3229 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
3230 chunks, control, rcr_ring, mailbox);
3231 if (status != NXGE_OK) {
3232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3233 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
3234 "returned 0x%x",
3235 channel, status));
3236 return (status);
3237 }
3238 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
3239 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
3240 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
3241 &nxgep->statsp->rdc_stats[channel];
3242
3243 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
3244 if (!isLDOMguest(nxgep))
3245 nxge_rdc_hvio_setup(nxgep, channel);
3246 #endif
3247
3248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3249 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
3250
3251 return (status);
3252 }
3253
3254 static void
nxge_unmap_rxdma(p_nxge_t nxgep,int channel)3255 nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
3256 {
3257 rx_rbr_ring_t *rbr_ring;
3258 rx_rcr_ring_t *rcr_ring;
3259 rx_mbox_t *mailbox;
3260
3261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
3262
3263 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
3264 !nxgep->rx_mbox_areas_p)
3265 return;
3266
3267 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
3268 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
3269 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3270
3271 if (!rbr_ring || !rcr_ring || !mailbox)
3272 return;
3273
3274 (void) nxge_unmap_rxdma_channel(
3275 nxgep, channel, rbr_ring, rcr_ring, mailbox);
3276
3277 nxge_free_rxb(nxgep, channel);
3278
3279 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
3280 }
3281
3282 nxge_status_t
nxge_map_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)3283 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
3284 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
3285 uint32_t num_chunks,
3286 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
3287 p_rx_mbox_t *rx_mbox_p)
3288 {
3289 int status = NXGE_OK;
3290
3291 /*
3292 * Set up and prepare buffer blocks, descriptors
3293 * and mailbox.
3294 */
3295 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3296 "==> nxge_map_rxdma_channel (channel %d)", channel));
3297 /*
3298 * Receive buffer blocks
3299 */
3300 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
3301 dma_buf_p, rbr_p, num_chunks);
3302 if (status != NXGE_OK) {
3303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3304 "==> nxge_map_rxdma_channel (channel %d): "
3305 "map buffer failed 0x%x", channel, status));
3306 goto nxge_map_rxdma_channel_exit;
3307 }
3308
3309 /*
3310 * Receive block ring, completion ring and mailbox.
3311 */
3312 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
3313 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
3314 if (status != NXGE_OK) {
3315 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3316 "==> nxge_map_rxdma_channel (channel %d): "
3317 "map config failed 0x%x", channel, status));
3318 goto nxge_map_rxdma_channel_fail2;
3319 }
3320
3321 goto nxge_map_rxdma_channel_exit;
3322
3323 nxge_map_rxdma_channel_fail3:
3324 /* Free rbr, rcr */
3325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3326 "==> nxge_map_rxdma_channel: free rbr/rcr "
3327 "(status 0x%x channel %d)",
3328 status, channel));
3329 nxge_unmap_rxdma_channel_cfg_ring(nxgep,
3330 *rcr_p, *rx_mbox_p);
3331
3332 nxge_map_rxdma_channel_fail2:
3333 /* Free buffer blocks */
3334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3335 "==> nxge_map_rxdma_channel: free rx buffers"
3336 "(nxgep 0x%x status 0x%x channel %d)",
3337 nxgep, status, channel));
3338 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
3339
3340 status = NXGE_ERROR;
3341
3342 nxge_map_rxdma_channel_exit:
3343 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
3344 "<== nxge_map_rxdma_channel: "
3345 "(nxgep 0x%x status 0x%x channel %d)",
3346 nxgep, status, channel));
3347