xref: /illumos-gate/usr/src/uts/common/io/bnx/bnxhwi.c (revision eef4f27b)
1 /*
2  * Copyright 2014-2017 Cavium, Inc.
3  * The contents of this file are subject to the terms of the Common Development
4  * and Distribution License, v.1,  (the "License").
5  *
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the License at available
9  * at http://opensource.org/licenses/CDDL-1.0
10  *
11  * See the License for the specific language governing permissions and
12  * limitations under the License.
13  */
14 
15 /*
16  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
17  * Copyright (c) 2019, Joyent, Inc.
18  */
19 
20 #include "bnx.h"
21 #include "bnx_mm.h"
22 #include "bnxsnd.h"
23 #include "bnxrcv.h"
24 #include "bnxint.h"
25 #include "bnxtmr.h"
26 #include "bnxcfg.h"
27 
28 void
bnx_update_phy(um_device_t * const umdevice)29 bnx_update_phy(um_device_t * const umdevice)
30 {
31 	lm_status_t lmstatus;
32 	lm_device_t *lmdevice;
33 
34 	lmdevice = &(umdevice->lm_dev);
35 
36 	/* Map 'ndd' parameters to LM struct. */
37 	bnx_cfg_map_phy(umdevice);
38 
39 	mutex_enter(&umdevice->os_param.phy_mutex);
40 
41 	/* Reset, re-program and bring-up phy. */
42 	lmstatus = lm_init_phy(lmdevice, lmdevice->params.req_medium,
43 	    lmdevice->params.flow_ctrl_cap, lmdevice->params.selective_autoneg,
44 	    lmdevice->params.wire_speed, 0);
45 	if (lmstatus != LM_STATUS_SUCCESS) {
46 		cmn_err(CE_WARN, "%s: Failed to configure the PHY.",
47 		    umdevice->dev_name);
48 	}
49 
50 	lm_service_phy_int(lmdevice, TRUE);
51 
52 	mutex_exit(&umdevice->os_param.phy_mutex);
53 }
54 
55 ddi_dma_handle_t *
bnx_find_dma_hdl(um_device_t * const umdevice,const void * const virtaddr)56 bnx_find_dma_hdl(um_device_t *const umdevice, const void *const virtaddr)
57 {
58 	int i;
59 	ddi_dma_handle_t *dmahdl;
60 
61 	dmahdl = NULL;
62 	for (i = 0; i < umdevice->os_param.dma_handles_used; i++) {
63 		if (umdevice->os_param.dma_virt[i] == virtaddr) {
64 			dmahdl = &(umdevice->os_param.dma_handle[i]);
65 		}
66 	}
67 
68 	return (dmahdl);
69 }
70 
71 static void
bnx_free_lmmem(um_device_t * const umdevice)72 bnx_free_lmmem(um_device_t * const umdevice)
73 {
74 	int i;
75 	bnx_memreq_t *memreq;
76 	ddi_dma_handle_t *dma_handle;
77 	ddi_acc_handle_t *acc_handle;
78 
79 	if (umdevice->os_param.dma_handles_used != 0) {
80 		i = umdevice->os_param.dma_handles_used - 1;
81 
82 		dma_handle = &(umdevice->os_param.dma_handle[i]);
83 		acc_handle = &(umdevice->os_param.dma_acc_handle[i]);
84 
85 		/* Free all shared memory. */
86 		for (; i >= 0; i--) {
87 			(void) ddi_dma_unbind_handle(*dma_handle);
88 
89 			ddi_dma_mem_free(acc_handle);
90 
91 			ddi_dma_free_handle(dma_handle);
92 
93 			dma_handle--;
94 			acc_handle--;
95 		}
96 
97 		umdevice->os_param.dma_handles_used = 0;
98 	}
99 
100 	if (umdevice->memcnt != 0) {
101 		/* Free all local memory. */
102 		for (i = umdevice->memcnt - 1; i >= 0; i--) {
103 			memreq = &umdevice->memreq[i];
104 
105 			kmem_free(memreq->addr, memreq->size);
106 
107 			memreq->addr = NULL;
108 			memreq->size = 0;
109 		}
110 
111 		umdevice->memcnt = 0;
112 	}
113 }
114 
115 int
bnx_hdwr_init(um_device_t * const umdevice)116 bnx_hdwr_init(um_device_t *const umdevice)
117 {
118 	lm_status_t lmstatus;
119 	lm_device_t *lmdevice;
120 
121 	lmdevice = &(umdevice->lm_dev);
122 
123 	lmstatus = lm_get_dev_info(lmdevice);
124 	if (lmstatus != LM_STATUS_SUCCESS) {
125 		cmn_err(CE_WARN, "%s: Failed to get device information.\n",
126 		    umdevice->dev_name);
127 		return (-1);
128 	}
129 
130 	/*
131 	 * Initialize the adapter resource.  Mainly allocating memory needed
132 	 * by the driver, such as packet descriptors, shared memory, etc.
133 	 */
134 	lmstatus = lm_init_resc(lmdevice);
135 	if (lmstatus != LM_STATUS_SUCCESS) {
136 		cmn_err(CE_WARN, "%s: Failed to allocate device resources.\n",
137 		    umdevice->dev_name);
138 		goto error1;
139 	}
140 
141 	if (bnx_txpkts_init(umdevice)) {
142 		goto error1;
143 	}
144 
145 	if (bnx_rxpkts_init(umdevice)) {
146 		goto error2;
147 	}
148 
149 	/* Find	the DMA handle associated with the status block memory. */
150 	umdevice->os_param.status_block_dma_hdl = bnx_find_dma_hdl(umdevice,
151 	    (void *)(umdevice->lm_dev.vars.status_virt));
152 
153 	/* Reset the local interrupt event index. */
154 	umdevice->dev_var.processed_status_idx = 0;
155 
156 	/* Initialize the receive mask to a sane default. */
157 	umdevice->dev_var.rx_filter_mask = LM_RX_MASK_ACCEPT_UNICAST |
158 	    LM_RX_MASK_ACCEPT_BROADCAST;
159 
160 	return (0);
161 
162 error2:
163 	bnx_txpkts_fini(umdevice);
164 
165 error1:
166 	bnx_free_lmmem(umdevice);
167 
168 	return (-1);
169 }
170 
171 int
bnx_hdwr_acquire(um_device_t * const umdevice)172 bnx_hdwr_acquire(um_device_t *const umdevice)
173 {
174 	lm_status_t lmstatus;
175 	lm_device_t *lmdevice;
176 
177 	lmdevice = &(umdevice->lm_dev);
178 
179 	/* Reset the configuration to the hardware default. */
180 	bnx_cfg_reset(umdevice);
181 
182 	/*
183 	 * A call to lm_reset() implicitly means we are relieving the firmware
184 	 * of it's responsibility to maintain the device.  The driver assumes
185 	 * control.  The LM vars.medium field normally gets set with a call to
186 	 * lm_init_phy(), but this function cannot be called before we assume
187 	 * control of the device.  If we did, we run the risk of contending
188 	 * with the firmware for PHY accesses.  Do the next best thing.
189 	 */
190 	lmdevice->vars.medium = lm_get_medium(lmdevice);
191 
192 	/* Map 'ndd' parameters to LM struct. */
193 	bnx_cfg_map_phy(umdevice);
194 
195 	/* Bring the chip under driver control. */
196 	lmstatus = lm_reset(lmdevice, LM_REASON_DRIVER_RESET);
197 	if (lmstatus != LM_STATUS_SUCCESS) {
198 		cmn_err(CE_WARN, "%s: Failed to reset chip.\n",
199 		    umdevice->dev_name);
200 		return (-1);
201 	}
202 
203 	/* Configure the PHY to the requested settings. */
204 	lmstatus = lm_init_phy(lmdevice, lmdevice->params.req_medium,
205 	    lmdevice->params.flow_ctrl_cap, lmdevice->params.selective_autoneg,
206 	    lmdevice->params.wire_speed, 0);
207 	if (lmstatus != LM_STATUS_SUCCESS) {
208 		cmn_err(CE_WARN, "%s: Failed to initialize the PHY.",
209 		    umdevice->dev_name);
210 	}
211 
212 	lm_service_phy_int(lmdevice, FALSE); /* force a phy status update */
213 
214 	umdevice->dev_var.indLink = lmdevice->vars.link_status;
215 	umdevice->dev_var.indMedium = lmdevice->vars.medium;
216 
217 	/*
218 	 * Need to clear TX PATCH scratch register at offset 0x420
219 	 * to instruct chip to do full TCP checksum calculations.
220 	 */
221 	REG_WR_IND(lmdevice, (OFFSETOF(reg_space_t, tpat.tpat_scratch[0]) +
222 	    0x420), 0);
223 
224 	FLUSHPOSTEDWRITES(lmdevice);
225 
226 	umdevice->recv_discards = 0;
227 
228 	/* Make sure the rx statistics counters are reset. */
229 	bzero(&(lmdevice->rx_info.stats), sizeof (lm_rx_stats_t));
230 
231 	/* Post rx buffers to the chip. */
232 	(void) lm_post_buffers(lmdevice, 0, NULL);
233 
234 	/* Allow the hardware to accept rx traffic. */
235 	(void) lm_set_rx_mask(lmdevice, RX_FILTER_USER_IDX0,
236 	    umdevice->dev_var.rx_filter_mask);
237 
238 	FLUSHPOSTEDWRITES(lmdevice);
239 
240 	/* Enable interrupts. */
241 	bnx_intr_enable(umdevice);
242 
243 	/* Start the periodic timer. */
244 	bnx_timer_start(umdevice);
245 
246 	return (0);
247 }
248 
249 void
bnx_hdwr_release(um_device_t * const umdevice)250 bnx_hdwr_release(um_device_t *const umdevice)
251 {
252 	int reason;
253 	lm_device_t *lmdevice;
254 
255 	lmdevice = &(umdevice->lm_dev);
256 
257 	/* Stop the periodic timer. */
258 	bnx_timer_stop(umdevice);
259 
260 	/* Disable interrupts. */
261 	bnx_intr_disable(umdevice);
262 
263 	/*
264 	 * In Solaris when RX traffic is accepted, the system might generate
265 	 * and attempt to send some TX packets (from within gld_recv() !).
266 	 * Claiming any TX locks before this point would create a deadlock.
267 	 * The ISR would be waiting for a lock acquired here that would never
268 	 * be freed, since we in-turn would be waiting for the ISR to finish
269 	 * here.  Consequently, we acquire the TX lock as soon as we know that
270 	 * no TX traffic is a result of RX traffic.
271 	 */
272 	rw_enter(&umdevice->os_param.gld_snd_mutex, RW_WRITER);
273 
274 	/* Set RX mask to stop receiving any further packets */
275 	(void) lm_set_rx_mask(lmdevice, RX_FILTER_USER_IDX0,
276 	    LM_RX_MASK_ACCEPT_NONE);
277 
278 	FLUSHPOSTEDWRITES(lmdevice);
279 
280 	if (umdevice->dev_var.fw_ver < FW_VER_WITH_UNLOAD_POWER_DOWN) {
281 		reason = LM_REASON_DRIVER_SHUTDOWN;
282 	} else {
283 		reason = LM_REASON_DRIVER_UNLOAD_POWER_DOWN;
284 	}
285 
286 	lm_chip_reset(lmdevice, reason);
287 
288 	FLUSHPOSTEDWRITES(lmdevice);
289 
290 	/* Reclaim all tx buffers submitted to the hardware. */
291 	bnx_txpkts_flush(umdevice);
292 
293 	/* Reclaim all rx buffers submitted to the hardware. */
294 	bnx_rxpkts_recycle(umdevice);
295 
296 	rw_exit(&umdevice->os_param.gld_snd_mutex);
297 }
298 
299 void
bnx_hdwr_fini(um_device_t * const umdevice)300 bnx_hdwr_fini(um_device_t *const umdevice)
301 {
302 	bnx_rxpkts_fini(umdevice);
303 
304 	bnx_txpkts_fini(umdevice);
305 
306 	bnx_free_lmmem(umdevice);
307 }
308 
309 static u32_t
compute_crc32(const u8_t * const buf,u32_t buf_size)310 compute_crc32(const u8_t *const buf, u32_t buf_size)
311 {
312 	u32_t reg;
313 	u32_t tmp;
314 	u32_t j;
315 	u32_t k;
316 
317 	reg = 0xffffffff;
318 
319 	for (j = 0; j < buf_size; j++) {
320 		reg ^= buf[j];
321 
322 		for (k = 0; k < 8; k++) {
323 			tmp = reg & 0x01;
324 
325 			reg >>= 1;
326 
327 			if (tmp) {
328 				reg ^= 0xedb88320;
329 			}
330 		}
331 	}
332 
333 	return (~reg);
334 }
335 
336 int
bnx_find_mchash_collision(lm_mc_table_t * mc_table,const uint8_t * const mc_addr)337 bnx_find_mchash_collision(lm_mc_table_t *mc_table, const uint8_t *const mc_addr)
338 {
339 	u32_t cur_bit_pos;
340 	u32_t tgt_bit_pos;
341 	u32_t idx;
342 	u32_t crc32;
343 
344 	crc32 = compute_crc32(mc_addr, ETHERNET_ADDRESS_SIZE);
345 
346 	tgt_bit_pos = ~crc32 & 0xff;
347 
348 	for (idx = 0; idx < mc_table->entry_cnt; idx++) {
349 		crc32 = compute_crc32(mc_table->addr_arr[idx].mc_addr,
350 		    ETHERNET_ADDRESS_SIZE);
351 
352 		/*
353 		 * The most significant 7 bits of the CRC32 (no inversion),
354 		 * are used to index into one of the possible 128 bit positions.
355 		 */
356 		cur_bit_pos = ~crc32 & 0xff;
357 
358 		if (tgt_bit_pos == cur_bit_pos) {
359 			return (idx);
360 		}
361 	}
362 
363 	return (-1);
364 }
365 
366 
367 
368 /*
369  * Name:	um_send_driver_pulse
370  *
371  * Input:       ptr to driver structure
372  *
373  * Return:      none
374  *
375  * Description: um_send_driver_pulse routine sends heartbeat pulse to firmware.
376  */
377 void
um_send_driver_pulse(um_device_t * const umdevice)378 um_send_driver_pulse(um_device_t *const umdevice)
379 {
380 	u32_t msg_code;
381 	u32_t offset;
382 	lm_device_t *lmdevice;
383 
384 	lmdevice = &(umdevice->lm_dev);
385 
386 	offset = lmdevice->hw_info.shmem_base;
387 	offset += OFFSETOF(shmem_region_t, drv_fw_mb.drv_pulse_mb);
388 
389 	mutex_enter(&umdevice->os_param.ind_mutex);
390 
391 	lmdevice->vars.drv_pulse_wr_seq++;
392 
393 	msg_code = lmdevice->vars.drv_pulse_wr_seq & DRV_PULSE_SEQ_MASK;
394 
395 	mutex_exit(&umdevice->os_param.ind_mutex);
396 
397 	if (lmdevice->params.test_mode & TEST_MODE_DRIVER_PULSE_ALWAYS_ALIVE) {
398 		msg_code |= DRV_PULSE_ALWAYS_ALIVE;
399 	}
400 
401 	REG_WR_IND(lmdevice, offset, msg_code);
402 }
403