nxge_rxdma.c (e11f0814) nxge_rxdma.c (da14cebe)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 25 unchanged lines hidden (view full) ---

34#include <npi_rx_wr64.h>
35
36#define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
38#define NXGE_ACTUAL_RDC(nxgep, rdc) \
39 (rdc + nxgep->pt_config.hw_config.start_rdc)
40
41/*
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 25 unchanged lines hidden (view full) ---

34#include <npi_rx_wr64.h>
35
36#define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
38#define NXGE_ACTUAL_RDC(nxgep, rdc) \
39 (rdc + nxgep->pt_config.hw_config.start_rdc)
40
41/*
42 * XXX: This is a tunable to limit the number of packets each interrupt
43 * handles. 0 (default) means that each interrupt takes as much packets
44 * as it finds.
45 */
46extern int nxge_max_intr_pkts;
47
48/*
42 * Globals: tunable parameters (/etc/system or adb)
43 *
44 */
45extern uint32_t nxge_rbr_size;
46extern uint32_t nxge_rcr_size;
47extern uint32_t nxge_rbr_spare_size;
48
49extern uint32_t nxge_mblks_pending;

--- 60 unchanged lines hidden (view full) ---

110 p_rcr_entry_t,
111 boolean_t *,
112 mblk_t **, mblk_t **);
113
114nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
115
116static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
117static void nxge_freeb(p_rx_msg_t);
49 * Globals: tunable parameters (/etc/system or adb)
50 *
51 */
52extern uint32_t nxge_rbr_size;
53extern uint32_t nxge_rcr_size;
54extern uint32_t nxge_rbr_spare_size;
55
56extern uint32_t nxge_mblks_pending;

--- 60 unchanged lines hidden (view full) ---

117 p_rcr_entry_t,
118 boolean_t *,
119 mblk_t **, mblk_t **);
120
121nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
122
123static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
124static void nxge_freeb(p_rx_msg_t);
118static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t);
125static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t);
119static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
120
121static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
122 uint32_t, uint32_t);
123
124static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
125 p_rx_rbr_ring_t);
126

--- 5 unchanged lines hidden (view full) ---

132nxge_rx_port_fatal_err_recover(p_nxge_t);
133
134static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
135
136nxge_status_t
137nxge_init_rxdma_channels(p_nxge_t nxgep)
138{
139 nxge_grp_set_t *set = &nxgep->rx_set;
126static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
127
128static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
129 uint32_t, uint32_t);
130
131static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
132 p_rx_rbr_ring_t);
133

--- 5 unchanged lines hidden (view full) ---

139nxge_rx_port_fatal_err_recover(p_nxge_t);
140
141static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
142
143nxge_status_t
144nxge_init_rxdma_channels(p_nxge_t nxgep)
145{
146 nxge_grp_set_t *set = &nxgep->rx_set;
140 int i, count, rdc, channel;
147 int i, count, channel;
141 nxge_grp_t *group;
148 nxge_grp_t *group;
149 dc_map_t map;
150 int dev_gindex;
142
143 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
144
145 if (!isLDOMguest(nxgep)) {
146 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
147 cmn_err(CE_NOTE, "hw_start_common");
148 return (NXGE_ERROR);
149 }
150 }
151
152 /*
153 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
154 * We only have 8 hardware RDC tables, but we may have
155 * up to 16 logical (software-defined) groups of RDCS,
156 * if we make use of layer 3 & 4 hardware classification.
157 */
158 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
159 if ((1 << i) & set->lg.map) {
160 group = set->group[i];
151
152 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
153
154 if (!isLDOMguest(nxgep)) {
155 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
156 cmn_err(CE_NOTE, "hw_start_common");
157 return (NXGE_ERROR);
158 }
159 }
160
161 /*
162 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
163 * We only have 8 hardware RDC tables, but we may have
164 * up to 16 logical (software-defined) groups of RDCS,
165 * if we make use of layer 3 & 4 hardware classification.
166 */
167 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
168 if ((1 << i) & set->lg.map) {
169 group = set->group[i];
161
170 dev_gindex =
171 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
172 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
162 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
173 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
163 if ((1 << channel) & group->map) {
174 if ((1 << channel) & map) {
164 if ((nxge_grp_dc_add(nxgep,
165 group, VP_BOUND_RX, channel)))
166 goto init_rxdma_channels_exit;
167 }
168 }
169 }
170 if (++count == set->lg.count)
171 break;
172 }
173
174 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
175 return (NXGE_OK);
176
177init_rxdma_channels_exit:
178 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
179 if ((1 << i) & set->lg.map) {
180 group = set->group[i];
175 if ((nxge_grp_dc_add(nxgep,
176 group, VP_BOUND_RX, channel)))
177 goto init_rxdma_channels_exit;
178 }
179 }
180 }
181 if (++count == set->lg.count)
182 break;
183 }
184
185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
186 return (NXGE_OK);
187
188init_rxdma_channels_exit:
189 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
190 if ((1 << i) & set->lg.map) {
191 group = set->group[i];
181
182 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
183 if ((1 << rdc) & group->map) {
192 dev_gindex =
193 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
194 map = nxgep->pt_config.rdc_grps[dev_gindex].map;
195 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
196 if ((1 << channel) & map) {
184 nxge_grp_dc_remove(nxgep,
197 nxge_grp_dc_remove(nxgep,
185 VP_BOUND_RX, rdc);
198 VP_BOUND_RX, channel);
186 }
187 }
188 }
199 }
200 }
201 }
189
190 if (++count == set->lg.count)
191 break;
192 }
193
194 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
195 return (NXGE_ERROR);
196}
197

--- 972 unchanged lines hidden (view full) ---

1170 /* RCR qlen */
1171 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1172 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1173
1174 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1175 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1176}
1177
202 if (++count == set->lg.count)
203 break;
204 }
205
206 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
207 return (NXGE_ERROR);
208}
209

--- 972 unchanged lines hidden (view full) ---

1182 /* RCR qlen */
1183 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
1184 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
1185
1186 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1187 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
1188}
1189
1178void
1179nxge_rxdma_stop(p_nxge_t nxgep)
1180{
1181 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop"));
1182
1183 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1184 (void) nxge_rx_mac_disable(nxgep);
1185 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP);
1186 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop"));
1187}
1188
1189void
1190nxge_rxdma_stop_reinit(p_nxge_t nxgep)
1191{
1192 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit"));
1193
1194 (void) nxge_rxdma_stop(nxgep);
1195 (void) nxge_uninit_rxdma_channels(nxgep);
1196 (void) nxge_init_rxdma_channels(nxgep);
1197
1198#ifndef AXIS_DEBUG_LB
1199 (void) nxge_xcvr_init(nxgep);
1200 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START);
1201#endif
1202 (void) nxge_rx_mac_enable(nxgep);
1203
1204 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit"));
1205}
1206
1207nxge_status_t
1208nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1209{
1210 nxge_grp_set_t *set = &nxgep->rx_set;
1211 nxge_status_t status;
1212 npi_status_t rs;
1213 int rdc;
1214

--- 218 unchanged lines hidden (view full) ---

1433
1434nxge_rxdma_fixup_channel_fail:
1435 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1436 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status));
1437
1438 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel"));
1439}
1440
1190nxge_status_t
1191nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1192{
1193 nxge_grp_set_t *set = &nxgep->rx_set;
1194 nxge_status_t status;
1195 npi_status_t rs;
1196 int rdc;
1197

--- 218 unchanged lines hidden (view full) ---

1416
1417nxge_rxdma_fixup_channel_fail:
1418 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1419 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status));
1420
1421 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel"));
1422}
1423
1441/* ARGSUSED */
1424/*
1425 * Convert an absolute RDC number to a Receive Buffer Ring index. That is,
1426 * map <channel> to an index into nxgep->rx_rbr_rings.
1427 * (device ring index -> port ring index)
1428 */
1442int
1443nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel)
1444{
1429int
1430nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel)
1431{
1445 return (channel);
1432 int i, ndmas;
1433 uint16_t rdc;
1434 p_rx_rbr_rings_t rx_rbr_rings;
1435 p_rx_rbr_ring_t *rbr_rings;
1436
1437 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1438 "==> nxge_rxdma_get_ring_index: channel %d", channel));
1439
1440 rx_rbr_rings = nxgep->rx_rbr_rings;
1441 if (rx_rbr_rings == NULL) {
1442 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1443 "<== nxge_rxdma_get_ring_index: NULL ring pointer"));
1444 return (-1);
1445 }
1446 ndmas = rx_rbr_rings->ndmas;
1447 if (!ndmas) {
1448 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1449 "<== nxge_rxdma_get_ring_index: no channel"));
1450 return (-1);
1451 }
1452
1453 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1454 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas));
1455
1456 rbr_rings = rx_rbr_rings->rbr_rings;
1457 for (i = 0; i < ndmas; i++) {
1458 rdc = rbr_rings[i]->rdc;
1459 if (channel == rdc) {
1460 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1461 "==> nxge_rxdma_get_rbr_ring: channel %d "
1462 "(index %d) ring %d", channel, i, rbr_rings[i]));
1463 return (i);
1464 }
1465 }
1466
1467 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1468 "<== nxge_rxdma_get_rbr_ring_index: not found"));
1469
1470 return (-1);
1446}
1447
1448p_rx_rbr_ring_t
1449nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1450{
1451 nxge_grp_set_t *set = &nxgep->rx_set;
1452 nxge_channel_t rdc;
1453

--- 333 unchanged lines hidden (view full) ---

1787nxge_rx_intr(void *arg1, void *arg2)
1788{
1789 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1790 p_nxge_t nxgep = (p_nxge_t)arg2;
1791 p_nxge_ldg_t ldgp;
1792 uint8_t channel;
1793 npi_handle_t handle;
1794 rx_dma_ctl_stat_t cs;
1471}
1472
1473p_rx_rbr_ring_t
1474nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
1475{
1476 nxge_grp_set_t *set = &nxgep->rx_set;
1477 nxge_channel_t rdc;
1478

--- 333 unchanged lines hidden (view full) ---

1812nxge_rx_intr(void *arg1, void *arg2)
1813{
1814 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1815 p_nxge_t nxgep = (p_nxge_t)arg2;
1816 p_nxge_ldg_t ldgp;
1817 uint8_t channel;
1818 npi_handle_t handle;
1819 rx_dma_ctl_stat_t cs;
1820 p_rx_rcr_ring_t rcr_ring;
1821 mblk_t *mp;
1795
1796#ifdef NXGE_DEBUG
1797 rxdma_cfig1_t cfg;
1798#endif
1822
1823#ifdef NXGE_DEBUG
1824 rxdma_cfig1_t cfg;
1825#endif
1799 uint_t serviced = DDI_INTR_UNCLAIMED;
1800
1801 if (ldvp == NULL) {
1802 NXGE_DEBUG_MSG((NULL, INT_CTL,
1803 "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1804 nxgep, ldvp));
1805
1806 return (DDI_INTR_CLAIMED);
1807 }

--- 13 unchanged lines hidden (view full) ---

1821 "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1822 nxgep, ldvp));
1823
1824 /*
1825 * This interrupt handler is for a specific
1826 * receive dma channel.
1827 */
1828 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1826
1827 if (ldvp == NULL) {
1828 NXGE_DEBUG_MSG((NULL, INT_CTL,
1829 "<== nxge_rx_intr: arg2 $%p arg1 $%p",
1830 nxgep, ldvp));
1831
1832 return (DDI_INTR_CLAIMED);
1833 }

--- 13 unchanged lines hidden (view full) ---

1847 "==> nxge_rx_intr: arg2 $%p arg1 $%p",
1848 nxgep, ldvp));
1849
1850 /*
1851 * This interrupt handler is for a specific
1852 * receive dma channel.
1853 */
1854 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1855
1856 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
1857
1829 /*
1858 /*
1859 * The RCR ring lock must be held when packets
1860 * are being processed and the hardware registers are
1861 * being read or written to prevent race condition
1862 * among the interrupt thread, the polling thread
1863 * (will cause fatal errors such as rcrincon bit set)
1864 * and the setting of the poll_flag.
1865 */
1866 MUTEX_ENTER(&rcr_ring->lock);
1867
1868 /*
1830 * Get the control and status for this channel.
1831 */
1832 channel = ldvp->channel;
1833 ldgp = ldvp->ldgp;
1869 * Get the control and status for this channel.
1870 */
1871 channel = ldvp->channel;
1872 ldgp = ldvp->ldgp;
1873
1874 if (!isLDOMguest(nxgep)) {
1875 if (!nxgep->rx_channel_started[channel]) {
1876 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1877 "<== nxge_rx_intr: channel is not started"));
1878 MUTEX_EXIT(&rcr_ring->lock);
1879 return (DDI_INTR_CLAIMED);
1880 }
1881 }
1882
1883 ASSERT(rcr_ring->ldgp == ldgp);
1884 ASSERT(rcr_ring->ldvp == ldvp);
1885
1834 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1835
1836 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1837 "cs 0x%016llx rcrto 0x%x rcrthres %x",
1838 channel,
1839 cs.value,
1840 cs.bits.hdw.rcrto,
1841 cs.bits.hdw.rcrthres));
1842
1886 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
1887
1888 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
1889 "cs 0x%016llx rcrto 0x%x rcrthres %x",
1890 channel,
1891 cs.value,
1892 cs.bits.hdw.rcrto,
1893 cs.bits.hdw.rcrthres));
1894
1843 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs);
1844 serviced = DDI_INTR_CLAIMED;
1895 mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs);
1845
1846 /* error events. */
1847 if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1848 (void) nxge_rx_err_evnts(nxgep, channel, cs);
1849 }
1850
1896
1897 /* error events. */
1898 if (cs.value & RX_DMA_CTL_STAT_ERROR) {
1899 (void) nxge_rx_err_evnts(nxgep, channel, cs);
1900 }
1901
1851nxge_intr_exit:
1852 /*
1853 * Enable the mailbox update interrupt if we want
1854 * to use mailbox. We probably don't need to use
1855 * mailbox as it only saves us one pio read.
1856 * Also write 1 to rcrthres and rcrto to clear
1857 * these two edge triggered bits.
1858 */
1902 /*
1903 * Enable the mailbox update interrupt if we want
1904 * to use mailbox. We probably don't need to use
1905 * mailbox as it only saves us one pio read.
1906 * Also write 1 to rcrthres and rcrto to clear
1907 * these two edge triggered bits.
1908 */
1859
1860 cs.value &= RX_DMA_CTL_STAT_WR1C;
1909 cs.value &= RX_DMA_CTL_STAT_WR1C;
1861 cs.bits.hdw.mex = 1;
1910 cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1;
1862 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1863 cs.value);
1864
1865 /*
1911 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
1912 cs.value);
1913
1914 /*
1866 * Rearm this logical group if this is a single device
1867 * group.
1915 * If the polling mode is enabled, disable the interrupt.
1868 */
1916 */
1869 if (ldgp->nldvs == 1) {
1870 ldgimgm_t mgm;
1871 mgm.value = 0;
1872 mgm.bits.ldw.arm = 1;
1873 mgm.bits.ldw.timer = ldgp->ldg_timer;
1874 if (isLDOMguest(nxgep)) {
1875 nxge_hio_ldgimgn(nxgep, ldgp);
1876 } else {
1917 if (rcr_ring->poll_flag) {
1918 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1919 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
1920 "(disabling interrupts)", channel, ldgp, ldvp));
1921 /*
1922 * Disarm this logical group if this is a single device
1923 * group.
1924 */
1925 if (ldgp->nldvs == 1) {
1926 ldgimgm_t mgm;
1927 mgm.value = 0;
1928 mgm.bits.ldw.arm = 0;
1877 NXGE_REG_WR64(handle,
1929 NXGE_REG_WR64(handle,
1878 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1879 mgm.value);
1930 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value);
1880 }
1931 }
1932 } else {
1933 /*
1934 * Rearm this logical group if this is a single device group.
1935 */
1936 if (ldgp->nldvs == 1) {
1937 if (isLDOMguest(nxgep)) {
1938 nxge_hio_ldgimgn(nxgep, ldgp);
1939 } else {
1940 ldgimgm_t mgm;
1941
1942 mgm.value = 0;
1943 mgm.bits.ldw.arm = 1;
1944 mgm.bits.ldw.timer = ldgp->ldg_timer;
1945
1946 NXGE_REG_WR64(handle,
1947 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
1948 mgm.value);
1949 }
1950 }
1951
1952 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1953 "==> nxge_rx_intr: rdc %d ldgp $%p "
1954 "exiting ISR (and call mac_rx_ring)", channel, ldgp));
1881 }
1955 }
1956 MUTEX_EXIT(&rcr_ring->lock);
1882
1957
1883 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d",
1884 serviced));
1885 return (serviced);
1958 if (mp) {
1959 if (!isLDOMguest(nxgep))
1960 mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp,
1961 rcr_ring->rcr_gen_num);
1962#if defined(sun4v)
1963 else { /* isLDOMguest(nxgep) */
1964 nxge_hio_data_t *nhd = (nxge_hio_data_t *)
1965 nxgep->nxge_hw_p->hio;
1966 nx_vio_fp_t *vio = &nhd->hio.vio;
1967
1968 if (vio->cb.vio_net_rx_cb) {
1969 (*vio->cb.vio_net_rx_cb)
1970 (nxgep->hio_vr->vhp, mp);
1971 }
1972 }
1973#endif
1974 }
1975 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
1976 return (DDI_INTR_CLAIMED);
1886}
1887
1888/*
1889 * Process the packets received in the specified logical device
1890 * and pass up a chain of message blocks to the upper layer.
1977}
1978
1979/*
1980 * Process the packets received in the specified logical device
1981 * and pass up a chain of message blocks to the upper layer.
1982 * The RCR ring lock must be held before calling this function.
1891 */
1983 */
1892static void
1984static mblk_t *
1893nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs)
1894{
1895 p_mblk_t mp;
1896 p_rx_rcr_ring_t rcrp;
1897
1898 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring"));
1899 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex];
1985nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs)
1986{
1987 p_mblk_t mp;
1988 p_rx_rcr_ring_t rcrp;
1989
1990 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring"));
1991 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex];
1900 if (rcrp->poll_flag) {
1901 /* It is in the poll mode */
1902 return;
1903 }
1904
1992
1993 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
1994 "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d "
1995 "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle));
1905 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) {
1906 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1907 "<== nxge_rx_pkts_vring: no mp"));
1996 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) {
1997 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1998 "<== nxge_rx_pkts_vring: no mp"));
1908 return;
1999 return (NULL);
1909 }
1910
1911 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p",
1912 mp));
1913
1914#ifdef NXGE_DEBUG
1915 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1916 "==> nxge_rx_pkts_vring:calling mac_rx "

--- 25 unchanged lines hidden (view full) ---

1942 NXGE_DEBUG_MSG((nxgep, RX_CTL,
1943 "==> nxge_rx_pkts_vring: dump next packets "
1944 "(b_rptr $%p): %s",
1945 mp->b_next->b_rptr,
1946 nxge_dump_packet((char *)mp->b_next->b_rptr,
1947 mp->b_next->b_wptr - mp->b_next->b_rptr)));
1948 }
1949#endif
2000 }
2001
2002 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p",
2003 mp));
2004
2005#ifdef NXGE_DEBUG
2006 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2007 "==> nxge_rx_pkts_vring:calling mac_rx "

--- 25 unchanged lines hidden (view full) ---

2033 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2034 "==> nxge_rx_pkts_vring: dump next packets "
2035 "(b_rptr $%p): %s",
2036 mp->b_next->b_rptr,
2037 nxge_dump_packet((char *)mp->b_next->b_rptr,
2038 mp->b_next->b_wptr - mp->b_next->b_rptr)));
2039 }
2040#endif
2041 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2042 "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ",
2043 rcrp->rdc, rcrp->rcr_mac_handle));
1950
2044
1951 if (!isLDOMguest(nxgep))
1952 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp);
1953#if defined(sun4v)
1954 else { /* isLDOMguest(nxgep) */
1955 nxge_hio_data_t *nhd = (nxge_hio_data_t *)
1956 nxgep->nxge_hw_p->hio;
1957 nx_vio_fp_t *vio = &nhd->hio.vio;
1958
1959 if (vio->cb.vio_net_rx_cb) {
1960 (*vio->cb.vio_net_rx_cb)
1961 (nxgep->hio_vr->vhp, mp);
1962 }
1963 }
1964#endif
2045 return (mp);
1965}
1966
1967
1968/*
1969 * This routine is the main packet receive processing function.
1970 * It gets the packet type, error code, and buffer related
1971 * information from the receive completion entry.
1972 * How many completion entries to process is based on the number of packets
1973 * queued by the hardware, a hardware maintained tail pointer
1974 * and a configurable receive packet count.
1975 *
1976 * A chain of message blocks will be created as result of processing
1977 * the completion entries. This chain of message blocks will be returned and
1978 * a hardware control status register will be updated with the number of
1979 * packets were removed from the hardware queue.
1980 *
2046}
2047
2048
2049/*
2050 * This routine is the main packet receive processing function.
2051 * It gets the packet type, error code, and buffer related
2052 * information from the receive completion entry.
2053 * How many completion entries to process is based on the number of packets
2054 * queued by the hardware, a hardware maintained tail pointer
2055 * and a configurable receive packet count.
2056 *
2057 * A chain of message blocks will be created as result of processing
2058 * the completion entries. This chain of message blocks will be returned and
2059 * a hardware control status register will be updated with the number of
2060 * packets were removed from the hardware queue.
2061 *
2062 * The RCR ring lock is held when entering this function.
1981 */
1982static mblk_t *
1983nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
1984 int bytes_to_pickup)
1985{
1986 npi_handle_t handle;
1987 uint8_t channel;
1988 uint32_t comp_rd_index;

--- 4 unchanged lines hidden (view full) ---

1993 uint32_t qlen_hw;
1994 boolean_t multi;
1995 rcrcfig_b_t rcr_cfg_b;
1996 int totallen = 0;
1997#if defined(_BIG_ENDIAN)
1998 npi_status_t rs = NPI_SUCCESS;
1999#endif
2000
2063 */
2064static mblk_t *
2065nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
2066 int bytes_to_pickup)
2067{
2068 npi_handle_t handle;
2069 uint8_t channel;
2070 uint32_t comp_rd_index;

--- 4 unchanged lines hidden (view full) ---

2075 uint32_t qlen_hw;
2076 boolean_t multi;
2077 rcrcfig_b_t rcr_cfg_b;
2078 int totallen = 0;
2079#if defined(_BIG_ENDIAN)
2080 npi_status_t rs = NPI_SUCCESS;
2081#endif
2082
2001 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
2083 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
2002 "channel %d", rcr_p->rdc));
2003
2004 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
2005 return (NULL);
2006 }
2007 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2008 channel = rcr_p->rdc;
2009

--- 17 unchanged lines hidden (view full) ---

2027 }
2028#endif
2029 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
2030 "qlen %d", channel, qlen));
2031
2032
2033
2034 if (!qlen) {
2084 "channel %d", rcr_p->rdc));
2085
2086 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
2087 return (NULL);
2088 }
2089 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2090 channel = rcr_p->rdc;
2091

--- 17 unchanged lines hidden (view full) ---

2109 }
2110#endif
2111 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
2112 "qlen %d", channel, qlen));
2113
2114
2115
2116 if (!qlen) {
2035 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2117 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2036 "==> nxge_rx_pkts:rcr channel %d "
2037 "qlen %d (no pkts)", channel, qlen));
2038
2039 return (NULL);
2040 }
2041
2042 comp_rd_index = rcr_p->comp_rd_index;
2043

--- 91 unchanged lines hidden (view full) ---

2135 multi,
2136 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2137 comp_rd_index));
2138
2139 if ((bytes_to_pickup != -1) &&
2140 (totallen >= bytes_to_pickup)) {
2141 break;
2142 }
2118 "==> nxge_rx_pkts:rcr channel %d "
2119 "qlen %d (no pkts)", channel, qlen));
2120
2121 return (NULL);
2122 }
2123
2124 comp_rd_index = rcr_p->comp_rd_index;
2125

--- 91 unchanged lines hidden (view full) ---

2217 multi,
2218 nrcr_read, npkt_read, rcr_desc_rd_head_pp,
2219 comp_rd_index));
2220
2221 if ((bytes_to_pickup != -1) &&
2222 (totallen >= bytes_to_pickup)) {
2223 break;
2224 }
2225
2226 /* limit the number of packets for interrupt */
2227 if (!(rcr_p->poll_flag)) {
2228 if (npkt_read == nxge_max_intr_pkts) {
2229 break;
2230 }
2231 }
2143 }
2144
2145 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2146 rcr_p->comp_rd_index = comp_rd_index;
2147 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2148
2149 if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2150 (nxgep->intr_threshold != rcr_p->intr_threshold)) {

--- 18 unchanged lines hidden (view full) ---

2169 channel,
2170 rcr_p->rcr_desc_rd_head_pp,
2171 rcr_p->comp_rd_index));
2172 /*
2173 * Update RCR buffer pointer read and number of packets
2174 * read.
2175 */
2176
2232 }
2233
2234 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
2235 rcr_p->comp_rd_index = comp_rd_index;
2236 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
2237
2238 if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
2239 (nxgep->intr_threshold != rcr_p->intr_threshold)) {

--- 18 unchanged lines hidden (view full) ---

2258 channel,
2259 rcr_p->rcr_desc_rd_head_pp,
2260 rcr_p->comp_rd_index));
2261 /*
2262 * Update RCR buffer pointer read and number of packets
2263 * read.
2264 */
2265
2177 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts"));
2266 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
2267 "channel %d", rcr_p->rdc));
2268
2178 return (head_mp);
2179}
2180
2181void
2182nxge_receive_packet(p_nxge_t nxgep,
2183 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2184 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2185{

--- 89 unchanged lines hidden (view full) ---

2275 if (!l2_len) {
2276
2277 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2278 "<== nxge_receive_packet: failed: l2 length is 0."));
2279 return;
2280 }
2281
2282 /*
2269 return (head_mp);
2270}
2271
2272void
2273nxge_receive_packet(p_nxge_t nxgep,
2274 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
2275 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
2276{

--- 89 unchanged lines hidden (view full) ---

2366 if (!l2_len) {
2367
2368 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2369 "<== nxge_receive_packet: failed: l2 length is 0."));
2370 return;
2371 }
2372
2373 /*
2283 * Sofware workaround for BMAC hardware limitation that allows
2374 * Software workaround for BMAC hardware limitation that allows
2284 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2285 * instead of 0x2400 for jumbo.
2286 */
2287 if (l2_len > nxgep->mac.maxframesize) {
2288 pkt_too_long_err = B_TRUE;
2289 }
2290
2291 /* Hardware sends us 4 bytes of CRC as no stripping is done. */

--- 21 unchanged lines hidden (view full) ---

2313
2314 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2315 "==> nxge_receive_packet: first entry 0x%016llx "
2316 "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2317 rcr_entry, pkt_buf_addr_pp, l2_len,
2318 hdr_size));
2319 }
2320
2375 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
2376 * instead of 0x2400 for jumbo.
2377 */
2378 if (l2_len > nxgep->mac.maxframesize) {
2379 pkt_too_long_err = B_TRUE;
2380 }
2381
2382 /* Hardware sends us 4 bytes of CRC as no stripping is done. */

--- 21 unchanged lines hidden (view full) ---

2404
2405 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2406 "==> nxge_receive_packet: first entry 0x%016llx "
2407 "pkt_buf_addr_pp $%p l2_len %d hdr %d",
2408 rcr_entry, pkt_buf_addr_pp, l2_len,
2409 hdr_size));
2410 }
2411
2321 MUTEX_ENTER(&rcr_p->lock);
2322 MUTEX_ENTER(&rx_rbr_p->lock);
2323
2324 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2325 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2326 "full pkt_buf_addr_pp $%p l2_len %d",
2327 rcr_entry, pkt_buf_addr_pp, l2_len));
2328
2329 /*

--- 9 unchanged lines hidden (view full) ---

2339
2340 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2341 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2342 "full pkt_buf_addr_pp $%p l2_len %d",
2343 rcr_entry, pkt_buf_addr_pp, l2_len));
2344
2345 if (status != NXGE_OK) {
2346 MUTEX_EXIT(&rx_rbr_p->lock);
2412 MUTEX_ENTER(&rx_rbr_p->lock);
2413
2414 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2415 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
2416 "full pkt_buf_addr_pp $%p l2_len %d",
2417 rcr_entry, pkt_buf_addr_pp, l2_len));
2418
2419 /*

--- 9 unchanged lines hidden (view full) ---

2429
2430 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2431 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
2432 "full pkt_buf_addr_pp $%p l2_len %d",
2433 rcr_entry, pkt_buf_addr_pp, l2_len));
2434
2435 if (status != NXGE_OK) {
2436 MUTEX_EXIT(&rx_rbr_p->lock);
2347 MUTEX_EXIT(&rcr_p->lock);
2348 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2349 "<== nxge_receive_packet: found vaddr failed %d",
2350 status));
2351 return;
2352 }
2353
2354 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2355 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "

--- 31 unchanged lines hidden (view full) ---

2387 case RCR_SINGLE_BLOCK:
2388 bsize = rx_msg_p->block_size;
2389 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2390 "==> nxge_receive_packet: single %d", bsize));
2391
2392 break;
2393 default:
2394 MUTEX_EXIT(&rx_rbr_p->lock);
2437 NXGE_DEBUG_MSG((nxgep, RX_CTL,
2438 "<== nxge_receive_packet: found vaddr failed %d",
2439 status));
2440 return;
2441 }
2442
2443 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2444 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "

--- 31 unchanged lines hidden (view full) ---

2476 case RCR_SINGLE_BLOCK:
2477 bsize = rx_msg_p->block_size;
2478 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2479 "==> nxge_receive_packet: single %d", bsize));
2480
2481 break;
2482 default:
2483 MUTEX_EXIT(&rx_rbr_p->lock);
2395 MUTEX_EXIT(&rcr_p->lock);
2396 return;
2397 }
2398
2399 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2400 (buf_offset + sw_offset_bytes),
2401 (hdr_size + l2_len),
2402 DDI_DMA_SYNC_FORCPU);
2403

--- 149 unchanged lines hidden (view full) ---

2553 */
2554 if (error_send_up == B_FALSE) {
2555 atomic_inc_32(&rx_msg_p->ref_cnt);
2556 if (buffer_free == B_TRUE) {
2557 rx_msg_p->free = B_TRUE;
2558 }
2559
2560 MUTEX_EXIT(&rx_rbr_p->lock);
2484 return;
2485 }
2486
2487 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
2488 (buf_offset + sw_offset_bytes),
2489 (hdr_size + l2_len),
2490 DDI_DMA_SYNC_FORCPU);
2491

--- 149 unchanged lines hidden (view full) ---

2641 */
2642 if (error_send_up == B_FALSE) {
2643 atomic_inc_32(&rx_msg_p->ref_cnt);
2644 if (buffer_free == B_TRUE) {
2645 rx_msg_p->free = B_TRUE;
2646 }
2647
2648 MUTEX_EXIT(&rx_rbr_p->lock);
2561 MUTEX_EXIT(&rcr_p->lock);
2562 nxge_freeb(rx_msg_p);
2563 return;
2564 }
2565 }
2566
2567 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2568 "==> nxge_receive_packet: DMA sync second "));
2569

--- 68 unchanged lines hidden (view full) ---

2638 } else {
2639 cmn_err(CE_WARN, "!nxge_receive_packet: "
2640 "update stats (error)");
2641 atomic_inc_32(&rx_msg_p->ref_cnt);
2642 if (buffer_free == B_TRUE) {
2643 rx_msg_p->free = B_TRUE;
2644 }
2645 MUTEX_EXIT(&rx_rbr_p->lock);
2649 nxge_freeb(rx_msg_p);
2650 return;
2651 }
2652 }
2653
2654 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
2655 "==> nxge_receive_packet: DMA sync second "));
2656

--- 68 unchanged lines hidden (view full) ---

2725 } else {
2726 cmn_err(CE_WARN, "!nxge_receive_packet: "
2727 "update stats (error)");
2728 atomic_inc_32(&rx_msg_p->ref_cnt);
2729 if (buffer_free == B_TRUE) {
2730 rx_msg_p->free = B_TRUE;
2731 }
2732 MUTEX_EXIT(&rx_rbr_p->lock);
2646 MUTEX_EXIT(&rcr_p->lock);
2647 nxge_freeb(rx_msg_p);
2648 return;
2649 }
2650
2651 if (buffer_free == B_TRUE) {
2652 rx_msg_p->free = B_TRUE;
2653 }
2654
2655 is_valid = (nmp != NULL);
2656
2657 rcr_p->rcvd_pkt_bytes = bytes_read;
2658
2659 MUTEX_EXIT(&rx_rbr_p->lock);
2733 nxge_freeb(rx_msg_p);
2734 return;
2735 }
2736
2737 if (buffer_free == B_TRUE) {
2738 rx_msg_p->free = B_TRUE;
2739 }
2740
2741 is_valid = (nmp != NULL);
2742
2743 rcr_p->rcvd_pkt_bytes = bytes_read;
2744
2745 MUTEX_EXIT(&rx_rbr_p->lock);
2660 MUTEX_EXIT(&rcr_p->lock);
2661
2662 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2663 atomic_inc_32(&rx_msg_p->ref_cnt);
2664 nxge_freeb(rx_msg_p);
2665 }
2666
2667 if (is_valid) {
2668 nmp->b_cont = NULL;

--- 8 unchanged lines hidden (view full) ---

2677 /*
2678 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2679 * If a packet is not fragmented and no error bit is set, then
2680 * L4 checksum is OK.
2681 */
2682
2683 if (is_valid && !multi) {
2684 /*
2746
2747 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2748 atomic_inc_32(&rx_msg_p->ref_cnt);
2749 nxge_freeb(rx_msg_p);
2750 }
2751
2752 if (is_valid) {
2753 nmp->b_cont = NULL;

--- 8 unchanged lines hidden (view full) ---

2762 /*
2763 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
2764 * If a packet is not fragmented and no error bit is set, then
2765 * L4 checksum is OK.
2766 */
2767
2768 if (is_valid && !multi) {
2769 /*
2685 * Update hardware checksuming.
2686 *
2687 * If the checksum flag nxge_chksum_offload
2688 * is 1, TCP and UDP packets can be sent
2689 * up with good checksum. If the checksum flag
2690 * is set to 0, checksum reporting will apply to
2691 * TCP packets only (workaround for a hardware bug).
2692 * If the checksum flag nxge_cksum_offload is
2693 * greater than 1, both TCP and UDP packets
2694 * will not be reported its hardware checksum results.

--- 27 unchanged lines hidden (view full) ---

2722 "==> nxge_receive_packet: *mp 0x%016llx", *mp));
2723
2724 *multi_p = (multi == RCR_MULTI_MASK);
2725 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2726 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2727 *multi_p, nmp, *mp, *mp_cont));
2728}
2729
2770 * If the checksum flag nxge_chksum_offload
2771 * is 1, TCP and UDP packets can be sent
2772 * up with good checksum. If the checksum flag
2773 * is set to 0, checksum reporting will apply to
2774 * TCP packets only (workaround for a hardware bug).
2775 * If the checksum flag nxge_cksum_offload is
2776 * greater than 1, both TCP and UDP packets
2777 * will not be reported its hardware checksum results.

--- 27 unchanged lines hidden (view full) ---

2805 "==> nxge_receive_packet: *mp 0x%016llx", *mp));
2806
2807 *multi_p = (multi == RCR_MULTI_MASK);
2808 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
2809 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2810 *multi_p, nmp, *mp, *mp_cont));
2811}
2812
2813/*
2814 * Enable polling for a ring. Interrupt for the ring is disabled when
2815 * the nxge interrupt comes (see nxge_rx_intr).
2816 */
2817int
2818nxge_enable_poll(void *arg)
2819{
2820 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2821 p_rx_rcr_ring_t ringp;
2822 p_nxge_t nxgep;
2823 p_nxge_ldg_t ldgp;
2824 uint32_t channel;
2825
2826 if (ring_handle == NULL) {
2827 return (0);
2828 }
2829
2830 nxgep = ring_handle->nxgep;
2831 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2832 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2833 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2834 "==> nxge_enable_poll: rdc %d ", ringp->rdc));
2835 ldgp = ringp->ldgp;
2836 if (ldgp == NULL) {
2837 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2838 "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
2839 ringp->rdc));
2840 return (0);
2841 }
2842
2843 MUTEX_ENTER(&ringp->lock);
2844 /* enable polling */
2845 if (ringp->poll_flag == 0) {
2846 ringp->poll_flag = 1;
2847 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2848 "==> nxge_enable_poll: rdc %d set poll flag to 1",
2849 ringp->rdc));
2850 }
2851
2852 MUTEX_EXIT(&ringp->lock);
2853 return (0);
2854}
2855/*
2856 * Disable polling for a ring and enable its interrupt.
2857 */
2858int
2859nxge_disable_poll(void *arg)
2860{
2861 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2862 p_rx_rcr_ring_t ringp;
2863 p_nxge_t nxgep;
2864 uint32_t channel;
2865
2866 if (ring_handle == NULL) {
2867 return (0);
2868 }
2869
2870 nxgep = ring_handle->nxgep;
2871 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2872 ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
2873
2874 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2875 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
2876
2877 MUTEX_ENTER(&ringp->lock);
2878
2879 /* disable polling: enable interrupt */
2880 if (ringp->poll_flag) {
2881 npi_handle_t handle;
2882 rx_dma_ctl_stat_t cs;
2883 uint8_t channel;
2884 p_nxge_ldg_t ldgp;
2885
2886 /*
2887 * Get the control and status for this channel.
2888 */
2889 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2890 channel = ringp->rdc;
2891 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
2892 channel, &cs.value);
2893
2894 /*
2895 * Enable mailbox update
2896 * Since packets were not read and the hardware uses
2897 * bits pktread and ptrread to update the queue
2898 * length, we need to set both bits to 0.
2899 */
2900 cs.bits.ldw.pktread = 0;
2901 cs.bits.ldw.ptrread = 0;
2902 cs.bits.hdw.mex = 1;
2903 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2904 cs.value);
2905
2906 /*
2907 * Rearm this logical group if this is a single device
2908 * group.
2909 */
2910 ldgp = ringp->ldgp;
2911 if (ldgp == NULL) {
2912 ringp->poll_flag = 0;
2913 MUTEX_EXIT(&ringp->lock);
2914 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2915 "==> nxge_disable_poll: no ldgp rdc %d "
2916 "(still set poll to 0", ringp->rdc));
2917 return (0);
2918 }
2919 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2920 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
2921 ringp->rdc, ldgp));
2922 if (ldgp->nldvs == 1) {
2923 ldgimgm_t mgm;
2924 mgm.value = 0;
2925 mgm.bits.ldw.arm = 1;
2926 mgm.bits.ldw.timer = ldgp->ldg_timer;
2927 NXGE_REG_WR64(handle,
2928 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value);
2929 }
2930 ringp->poll_flag = 0;
2931 }
2932
2933 MUTEX_EXIT(&ringp->lock);
2934 return (0);
2935}
2936
2937/*
2938 * Poll 'bytes_to_pickup' bytes of message from the rx ring.
2939 */
2940mblk_t *
2941nxge_rx_poll(void *arg, int bytes_to_pickup)
2942{
2943 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
2944 p_rx_rcr_ring_t rcr_p;
2945 p_nxge_t nxgep;
2946 npi_handle_t handle;
2947 rx_dma_ctl_stat_t cs;
2948 mblk_t *mblk;
2949 p_nxge_ldv_t ldvp;
2950 uint32_t channel;
2951
2952 nxgep = ring_handle->nxgep;
2953
2954 /*
2955 * Get the control and status for this channel.
2956 */
2957 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2958 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
2959 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
2960 MUTEX_ENTER(&rcr_p->lock);
2961 ASSERT(rcr_p->poll_flag == 1);
2962
2963 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
2964
2965 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2966 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
2967 rcr_p->rdc, rcr_p->poll_flag));
2968 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
2969
2970 ldvp = rcr_p->ldvp;
2971 /* error events. */
2972 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
2973 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
2974 }
2975
2976 MUTEX_EXIT(&rcr_p->lock);
2977
2978 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
2979 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
2980 return (mblk);
2981}
2982
2983
2730/*ARGSUSED*/
2731static nxge_status_t
2732nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2733{
2734 p_nxge_rx_ring_stats_t rdc_stats;
2735 npi_handle_t handle;
2736 npi_status_t rs;
2737 boolean_t rxchan_fatal = B_FALSE;

--- 1488 unchanged lines hidden (view full) ---

4226
4227 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4228 "==> nxge_rxdma_stop_channel: control done"));
4229
4230 /*
4231 * Make sure channel is disabled.
4232 */
4233 status = nxge_disable_rxdma_channel(nxgep, channel);
2984/*ARGSUSED*/
2985static nxge_status_t
2986nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
2987{
2988 p_nxge_rx_ring_stats_t rdc_stats;
2989 npi_handle_t handle;
2990 npi_status_t rs;
2991 boolean_t rxchan_fatal = B_FALSE;

--- 1488 unchanged lines hidden (view full) ---

4480
4481 NXGE_DEBUG_MSG((nxgep, RX_CTL,
4482 "==> nxge_rxdma_stop_channel: control done"));
4483
4484 /*
4485 * Make sure channel is disabled.
4486 */
4487 status = nxge_disable_rxdma_channel(nxgep, channel);
4488
4234 if (status != NXGE_OK) {
4235 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4236 " nxge_rxdma_stop_channel: "
4237 " init enable rxdma failed (0x%08x channel %d)",
4238 status, channel));
4239 return (status);
4240 }
4241

--- 550 unchanged lines hidden ---
4489 if (status != NXGE_OK) {
4490 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
4491 " nxge_rxdma_stop_channel: "
4492 " init enable rxdma failed (0x%08x channel %d)",
4493 status, channel));
4494 return (status);
4495 }
4496

--- 550 unchanged lines hidden ---