xref: /illumos-gate/usr/src/uts/common/io/bnx/bnx_mm.c (revision eef4f27b)
1 /*
2  * Copyright 2014-2017 Cavium, Inc.
3  * The contents of this file are subject to the terms of the Common Development
4  * and Distribution License, v.1,  (the "License").
5  *
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the License at available
9  * at http://opensource.org/licenses/CDDL-1.0
10  *
11  * See the License for the specific language governing permissions and
12  * limitations under the License.
13  */
14 
15 /*
16  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
17  * Copyright (c) 2019, Joyent, Inc.
18  */
19 
20 #include "bnx.h"
21 #include "bnx_mm.h"
22 #include "bnxgld.h"
23 #include "bnxsnd.h"
24 #include "bnxtmr.h"
25 #include "bnxcfg.h"
26 #include "serdes.h"
27 
28 #include "shmem.h"
29 
30 #define	MII_REG(_type, _field)	(OFFSETOF(_type, _field)/2)
31 
32 ddi_dma_attr_t bnx_std_dma_attrib = {
33 	DMA_ATTR_V0,		/* dma_attr_version */
34 	0,			/* dma_attr_addr_lo */
35 	0xffffffffffffffff,	/* dma_attr_addr_hi */
36 	0x0ffffff,		/* dma_attr_count_max */
37 	BNX_DMA_ALIGNMENT,	/* dma_attr_align */
38 	0xffffffff,		/* dma_attr_burstsizes */
39 	1,			/* dma_attr_minxfer */
40 	0x00ffffff,		/* dma_attr_maxxfer */
41 	0xffffffff,		/* dma_attr_seg */
42 	1,			/* dma_attr_sgllen */
43 	1,			/* dma_attr_granular */
44 	0,			/* dma_attr_flags */
45 };
46 
47 
48 static ddi_dma_attr_t bnx_page_dma_attrib = {
49 	DMA_ATTR_V0,		/* dma_attr_version */
50 	0,			/* dma_attr_addr_lo */
51 	0xffffffffffffffff,	/* dma_attr_addr_hi */
52 	0x0ffffff,		/* dma_attr_count_max */
53 	LM_PAGE_SIZE,		/* dma_attr_align */
54 	0xffffffff,		/* dma_attr_burstsizes */
55 	1,			/* dma_attr_minxfer */
56 	0x00ffffff,		/* dma_attr_maxxfer */
57 	0xffffffff,		/* dma_attr_seg */
58 	1,			/* dma_attr_sgllen */
59 	1,			/* dma_attr_granular */
60 	0,			/* dma_attr_flags */
61 };
62 
63 
64 
65 /*
66  * Name:        mm_wait
67  *
68  * Input:       ptr to LM's device structure,
69  *              delay value in micro-secs
70  *
71  * Return:      None.
72  *
73  * Description: This funtion will be in a busy loop for specified number of
74  *              micro-seconds and will return only after the time is elasped.
75  */
76 void
mm_wait(lm_device_t * pdev,u32_t delay_us)77 mm_wait(lm_device_t *pdev, u32_t delay_us)
78 {
79 	FLUSHPOSTEDWRITES(pdev);
80 	drv_usecwait(delay_us * 10);
81 } /* mm_wait */
82 
83 
84 
85 /*
86  * Name:        mm_read_pci
87  *
88  * Input:       ptr to LM's device structure,
89  *              register offset into config space,
90  *              ptr to u32 where the register value is returned
91  *
92  * Return:      LM_STATUS_SUCCESS, if successful
93  *              LM_STATUS_FAILURE, if BAR register veiw is not mapped
94  *
95  * Description: This routine reads the PCI config space for the given device
96  *              by calling pci_config_get32().
97  */
98 lm_status_t
mm_read_pci(lm_device_t * pdev,u32_t pci_reg,u32_t * reg_value)99 mm_read_pci(lm_device_t *pdev, u32_t pci_reg, u32_t *reg_value)
100 {
101 	um_device_t *udevp = (um_device_t *)pdev;
102 
103 	*reg_value = pci_config_get32(udevp->os_param.pci_cfg_handle,
104 	    (off_t)pci_reg);
105 
106 	return (LM_STATUS_SUCCESS);
107 } /* mm_read_pci */
108 
109 
110 
111 /*
112  * Name:        mm_write_pci
113  *
114  * Input:       ptr to LM's device structure,
115  *              register offset into config space,
116  *              u32 value to be written to PCI config register
117  *
118  * Return:      LM_STATUS_SUCCESS, if successful
119  *              LM_STATUS_FAILURE, if BAR register veiw is not mapped
120  *
121  * Description: This routine writes to PCI config register using DDI call,
122  *              pci_config_put32().
123  */
124 lm_status_t
mm_write_pci(lm_device_t * pdev,u32_t pci_reg,u32_t reg_value)125 mm_write_pci(lm_device_t *pdev, u32_t pci_reg, u32_t reg_value)
126 {
127 	um_device_t *udevp = (um_device_t *)pdev;
128 
129 	pci_config_put32(udevp->os_param.pci_cfg_handle,
130 	    (off_t)pci_reg, (uint32_t)reg_value);
131 
132 	return (LM_STATUS_SUCCESS);
133 } /* mm_write_pci */
134 
135 
136 
137 /*
138  * Name:        mm_map_io_base
139  *
140  * Input:       ptr to LM's device structure,
141  *              physical address of the BAR reg
142  *                      (not used in this implementation),
143  *              size of the register window
144  *
145  * Return:      ptr to mapped virtual memory
146  *
147  * Description: This routine maps the BAR register window and returns the
148  *              virtual address in the CPU address scape
149  */
150 void *
mm_map_io_base(lm_device_t * pdev,lm_address_t base_addr,u32_t size)151 mm_map_io_base(lm_device_t *pdev, lm_address_t base_addr, u32_t size)
152 {
153 	um_device_t *udevp = (um_device_t *)pdev;
154 
155 	pdev->vars.dmaRegAccHandle = udevp->os_param.reg_acc_handle;
156 
157 	return ((void *)(udevp->os_param.regs_addr));
158 } /* mm_map_io_base */
159 
160 
161 
162 /*
163  * Name:        mm_desc_size
164  *
165  * Input:       ptr to LM's device structure,
166  *              descriptor type
167  *
168  * Return:      size of the descriptor structure
169  *
170  * Description: This routine currently returns the size of packet descriptor
171  *              as defined by the UM module (lm_pkt_t is embedded in this
172  *              struct). This is used by LM's init routines trying to allocate
173  *              memory for TX/RX descriptor queues.
174  */
175 u32_t
mm_desc_size(lm_device_t * pdev,u32_t desc_type)176 mm_desc_size(lm_device_t *pdev, u32_t desc_type)
177 {
178 	u32_t desc_size;
179 
180 	switch (desc_type) {
181 	case DESC_TYPE_L2RX_PACKET:
182 		desc_size = sizeof (um_rxpacket_t);
183 		break;
184 
185 	default:
186 		desc_size = 0;
187 		break;
188 	}
189 
190 	desc_size = ALIGN_VALUE_TO_WORD_BOUNDARY(desc_size);
191 
192 	return (desc_size);
193 } /* mm_desc_size */
194 
195 
196 
197 /*
198  * Name:        mm_get_user_config
199  *
200  * Input:       ptr to LM's device structure
201  *
202  * Return:      SUCCESS
203  *
204  * Description: This rotuine maps user option to corresponding parameters in
205  *              LM and UM device structures.
206  */
207 lm_status_t
mm_get_user_config(lm_device_t * pdev)208 mm_get_user_config(lm_device_t *pdev)
209 {
210 	u32_t keep_vlan_tag = 0;
211 	u32_t offset;
212 	u32_t val;
213 	um_device_t *umdevice = (um_device_t *)pdev;
214 
215 	bnx_cfg_init(umdevice);
216 
217 	bnx_cfg_map_phy(umdevice);
218 
219 	/*
220 	 * If Management Firmware is running ensure that we don't
221 	 * keep the VLAN tag, this is for older firmware
222 	 */
223 	offset = pdev->hw_info.shmem_base;
224 	offset += OFFSETOF(shmem_region_t,
225 	    dev_info.port_feature_config.config);
226 	REG_RD_IND(pdev, offset, &val);
227 
228 	if (!(val & PORT_FEATURE_MFW_ENABLED))
229 		keep_vlan_tag = 1;
230 
231 	/*
232 	 * Newer versions of the firmware can handle VLAN tags
233 	 * check to see if this version of the firmware can handle them
234 	 */
235 	offset = pdev->hw_info.shmem_base;
236 	offset += OFFSETOF(shmem_region_t, drv_fw_cap_mb.fw_cap_mb);
237 	REG_RD_IND(pdev, offset, &val);
238 
239 	if ((val & FW_CAP_SIGNATURE) == FW_CAP_SIGNATURE) {
240 		if ((val & (FW_CAP_MFW_CAN_KEEP_VLAN |
241 		    FW_CAP_BC_CAN_UPDATE_VLAN)) ==
242 		    (FW_CAP_MFW_CAN_KEEP_VLAN | FW_CAP_BC_CAN_UPDATE_VLAN)) {
243 			offset = pdev->hw_info.shmem_base;
244 			offset += OFFSETOF(shmem_region_t,
245 			    drv_fw_cap_mb.drv_ack_cap_mb);
246 			REG_WR_IND(pdev, offset, DRV_ACK_CAP_SIGNATURE |
247 			    FW_CAP_MFW_CAN_KEEP_VLAN |
248 			    FW_CAP_BC_CAN_UPDATE_VLAN);
249 
250 			keep_vlan_tag = 1;
251 		}
252 	}
253 
254 	pdev->params.keep_vlan_tag = keep_vlan_tag;
255 
256 	return (LM_STATUS_SUCCESS);
257 } /* mm_get_user_config */
258 
259 
260 
261 /*
262  * Name:        mm_alloc_mem
263  *
264  * Input:       ptr to LM's device structure,
265  *              size of the memory block to be allocated
266  *
267  * Return:      ptr to newly allocated memory region
268  *
269  * Description: This routine allocates memory region, updates the
270  *              resource list to reflect this newly allocated memory.
271  */
272 void *
mm_alloc_mem(lm_device_t * pdev,u32_t mem_size,void * resc_list)273 mm_alloc_mem(lm_device_t *pdev, u32_t mem_size, void *resc_list)
274 {
275 	void *memptr;
276 	bnx_memreq_t *memreq;
277 	um_device_t *umdevice;
278 
279 	(void) resc_list;
280 
281 	umdevice = (um_device_t *)pdev;
282 
283 	if (mem_size == 0) {
284 		return (NULL);
285 	}
286 
287 	if (umdevice->memcnt == BNX_MAX_MEMREQS) {
288 		cmn_err(CE_WARN, "%s: Lower module memreq overflow.\n",
289 		    umdevice->dev_name);
290 		return (NULL);
291 	}
292 
293 	memptr = kmem_zalloc(mem_size, KM_NOSLEEP);
294 	if (memptr == NULL) {
295 		cmn_err(CE_WARN, "%s: Failed to allocate local memory.\n",
296 		    umdevice->dev_name);
297 		return (NULL);
298 	}
299 
300 	memreq = &umdevice->memreq[umdevice->memcnt];
301 
302 	memreq->addr = memptr;
303 	memreq->size = mem_size;
304 
305 	umdevice->memcnt++;
306 
307 	return (memptr);
308 } /* mm_alloc_mem */
309 
310 
311 
312 /*
313  * Name:        mm_alloc_phys_mem
314  *
315  * Input:       ptr to LM's device structure,
316  *              size of the memory block to be allocated,
317  *              pointer to store phys address,
318  *              memory type
319  *
320  * Return:      virtual memory ptr to newly allocated memory region
321  *
322  * Description: This routine allocates memory region, updates the
323  *              resource list to reflect this newly allocated memory.
324  *              This function returns physical address in addition the
325  *              virtual address pointer.
326  */
327 void *
mm_alloc_phys_mem(lm_device_t * pdev,u32_t mem_size,lm_address_t * phys_mem,u8_t mem_type,void * resc_list)328 mm_alloc_phys_mem(lm_device_t *pdev, u32_t mem_size, lm_address_t *phys_mem,
329     u8_t mem_type, void *resc_list)
330 {
331 	int rc;
332 	caddr_t pbuf;
333 	um_device_t *udevp;
334 	size_t real_len;
335 	unsigned int count;
336 	ddi_dma_attr_t *dma_attrib;
337 	ddi_dma_handle_t *dma_handle;
338 	ddi_acc_handle_t *acc_handle;
339 	ddi_dma_cookie_t cookie;
340 
341 	(void) mem_type;
342 	(void) resc_list;
343 
344 	udevp = (um_device_t *)pdev;
345 
346 	if (mem_size == 0) {
347 		return (NULL);
348 	}
349 
350 	if (udevp->os_param.dma_handles_used == BNX_MAX_PHYS_MEMREQS) {
351 		cmn_err(CE_WARN, "%s: %s: Lower module phys memreq overflow.\n",
352 		    udevp->dev_name, __func__);
353 		return (NULL);
354 	}
355 
356 	if (!(mem_size & LM_PAGE_MASK)) {
357 		/* Size is multiple of page size. */
358 		dma_attrib = &bnx_page_dma_attrib;
359 	} else {
360 		dma_attrib = &bnx_std_dma_attrib;
361 	}
362 
363 	rc = udevp->os_param.dma_handles_used;
364 	dma_handle = &udevp->os_param.dma_handle[rc];
365 	acc_handle = &udevp->os_param.dma_acc_handle[rc];
366 
367 	rc = ddi_dma_alloc_handle(udevp->os_param.dip, dma_attrib,
368 	    DDI_DMA_DONTWAIT, (void *)0, dma_handle);
369 	if (rc != DDI_SUCCESS) {
370 		cmn_err(CE_WARN, "%s: %s: Failed to alloc phys dma handle.\n",
371 		    udevp->dev_name, __func__);
372 		return (NULL);
373 	}
374 
375 	rc = ddi_dma_mem_alloc(*dma_handle, (size_t)mem_size +
376 	    BNX_DMA_ALIGNMENT, &bnxAccessAttribBUF, DDI_DMA_CONSISTENT,
377 	    DDI_DMA_DONTWAIT, (void *)0, &pbuf, &real_len, acc_handle);
378 	if (rc != DDI_SUCCESS) {
379 		cmn_err(CE_WARN, "%s: %s: Failed to alloc phys memory.\n",
380 		    udevp->dev_name, __func__);
381 		goto error1;
382 	}
383 
384 	rc = ddi_dma_addr_bind_handle(*dma_handle, (struct as *)0, pbuf,
385 	    real_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT,
386 	    (void *)0, &cookie, &count);
387 	if (rc != DDI_SUCCESS) {
388 		cmn_err(CE_WARN, "%s: %s: Failed to bind DMA address.\n",
389 		    udevp->dev_name, __func__);
390 		goto error2;
391 	}
392 
393 	phys_mem->as_u64 = (u64_t)cookie.dmac_laddress;
394 
395 	/*
396 	 * Save the virtual memory address so
397 	 * we can get the dma_handle later.
398 	 */
399 	udevp->os_param.dma_virt[udevp->os_param.dma_handles_used] = pbuf;
400 
401 	udevp->os_param.dma_handles_used++;
402 
403 	/* Zero the memory... */
404 	bzero(pbuf, real_len);
405 
406 	/* ...and make sure the new contents are flushed back to main memory. */
407 	(void) ddi_dma_sync(*dma_handle, 0, real_len, DDI_DMA_SYNC_FORDEV);
408 
409 	return (pbuf);
410 
411 error2:
412 	ddi_dma_mem_free(acc_handle);
413 
414 error1:
415 	ddi_dma_free_handle(dma_handle);
416 
417 	return (NULL);
418 } /* mm_alloc_phys_mem */
419 
420 
421 
422 /*
423  * Name:        mm_indicate_tx
424  *
425  * Input:       ptr to LM's device structure,
426  *              TX chain index,
427  *              array of pointers to packet descriptors,
428  *              number of packet descriptors in array
429  *
430  * Return:      None
431  *
432  * Description:
433  *              Lower module calls this API function to return transmit packet
434  *              buffers to the system, and to allow the driver to reclaim
435  *              transmit resources.  This function is only called upon transmit
436  *              abort and so is not in the fast path.
437  */
438 void
mm_indicate_tx(lm_device_t * pdev,u32_t chain_idx,struct _lm_packet_t * packet_arr[],u32_t num_packets)439 mm_indicate_tx(lm_device_t *pdev, u32_t chain_idx,
440     struct _lm_packet_t *packet_arr[], u32_t num_packets)
441 {
442 	um_txpacket_t **pkt_ptr;
443 	um_txpacket_t *pkt;
444 	s_list_t comp_list;
445 
446 	pkt_ptr = (um_txpacket_t **)packet_arr;
447 
448 	s_list_init(&comp_list, NULL, NULL, 0);
449 
450 	while (num_packets) {
451 		pkt = *pkt_ptr;
452 
453 		s_list_push_tail(&comp_list, &(pkt->lm_pkt.link));
454 
455 		pkt_ptr++;
456 		num_packets--;
457 	}
458 
459 	bnx_xmit_ring_reclaim((um_device_t *)pdev, chain_idx, &comp_list);
460 } /* mm_indicate_tx */
461 
462 
463 
464 /*
465  * Description:
466  *
467  * Return:
468  */
469 static void
bnx_display_link_msg(um_device_t * const umdevice)470 bnx_display_link_msg(um_device_t * const umdevice)
471 {
472 	char *media;
473 	char linkstr[128];
474 
475 	if (umdevice->dev_var.isfiber) {
476 		media = "Fiber";
477 	} else {
478 		media = "Copper";
479 	}
480 
481 	if (umdevice->nddcfg.link_speed != 0) {
482 		(void) strlcpy(linkstr, "up (", sizeof (linkstr));
483 
484 		switch (umdevice->nddcfg.link_speed) {
485 		case 2500:
486 			(void) strlcat(linkstr, "2500Mbps, ", sizeof (linkstr));
487 			break;
488 		case 1000:
489 			(void) strlcat(linkstr, "1000Mbps, ", sizeof (linkstr));
490 			break;
491 		case 100:
492 			(void) strlcat(linkstr, "100Mbps, ", sizeof (linkstr));
493 			break;
494 		case 10:
495 			(void) strlcat(linkstr, "10Mbps, ", sizeof (linkstr));
496 			break;
497 		default:
498 			(void) strlcat(linkstr, "0Mbps, ", sizeof (linkstr));
499 		}
500 
501 		if (umdevice->nddcfg.link_duplex) {
502 			(void) strlcat(linkstr, "Full Duplex",
503 			    sizeof (linkstr));
504 		} else {
505 			(void) strlcat(linkstr, "Half Duplex",
506 			    sizeof (linkstr));
507 		}
508 
509 		if (umdevice->nddcfg.link_tx_pause ||
510 		    umdevice->nddcfg.link_rx_pause) {
511 			(void) strlcat(linkstr, ", ", sizeof (linkstr));
512 			if (umdevice->nddcfg.link_tx_pause) {
513 				(void) strlcat(linkstr, "Tx", sizeof (linkstr));
514 				if (umdevice->nddcfg.link_rx_pause) {
515 					(void) strlcat(linkstr, " & Rx",
516 					    sizeof (linkstr));
517 				}
518 			} else {
519 				(void) strlcat(linkstr, "Rx", sizeof (linkstr));
520 			}
521 			(void) strlcat(linkstr, " Flow Control ON",
522 			    sizeof (linkstr));
523 		}
524 		(void) strlcat(linkstr, ")", sizeof (linkstr));
525 	} else {
526 		(void) snprintf(linkstr, sizeof (linkstr), "down");
527 	}
528 
529 	cmn_err(CE_NOTE, "!%s: %s link is %s", umdevice->dev_name, media,
530 	    linkstr);
531 } /* bnx_display_link_msg */
532 
533 
534 
535 /*
536  * Name:        bnx_update_lp_cap
537  *
538  * Input:       ptr to device structure
539  *
540  * Return:      None
541  *
542  * Description: This function is updates link partners advertised
543  *              capabilities.
544  */
545 static void
bnx_update_lp_cap(um_device_t * const umdevice)546 bnx_update_lp_cap(um_device_t *const umdevice)
547 {
548 	u32_t		miireg;
549 	lm_status_t	lmstatus;
550 	lm_device_t	*lmdevice;
551 
552 	lmdevice = &(umdevice->lm_dev);
553 
554 	if (umdevice->dev_var.isfiber) {
555 		lmstatus = lm_mread(lmdevice, lmdevice->params.phy_addr,
556 		    MII_REG(serdes_reg_t, mii_aneg_nxt_pg_rcv1), &miireg);
557 		if (lmstatus == LM_STATUS_SUCCESS) {
558 			if (miireg & MII_ANEG_NXT_PG_RCV1_2G5) {
559 				umdevice->remote.param_2500fdx = B_TRUE;
560 			}
561 		}
562 
563 		lmstatus = lm_mread(lmdevice, lmdevice->params.phy_addr,
564 		    PHY_LINK_PARTNER_ABILITY_REG, &miireg);
565 		if (lmstatus == LM_STATUS_SUCCESS) {
566 			miireg &= MII_ABILITY_PAUSE;
567 			if (miireg == MII_ADVERT_SYM_PAUSE) {
568 				umdevice->remote.param_tx_pause = B_TRUE;
569 				umdevice->remote.param_rx_pause = B_TRUE;
570 			} else if (miireg == MII_ADVERT_ASYM_PAUSE) {
571 				umdevice->remote.param_tx_pause = B_TRUE;
572 			}
573 
574 			if (miireg & MII_ABILITY_FULL) {
575 				umdevice->remote.param_1000fdx = B_TRUE;
576 			}
577 
578 			if (miireg & MII_ABILITY_HALF) {
579 				umdevice->remote.param_1000hdx = B_TRUE;
580 			}
581 		}
582 	} else {
583 		/* Copper */
584 		lmstatus = lm_mread(lmdevice, lmdevice->params.phy_addr,
585 		    PHY_1000BASET_STATUS_REG, &miireg);
586 		if (lmstatus == LM_STATUS_SUCCESS) {
587 			if (miireg & PHY_LINK_PARTNER_1000BASET_FULL) {
588 				umdevice->remote.param_1000fdx = B_TRUE;
589 			}
590 
591 			if (miireg & PHY_LINK_PARTNER_1000BASET_HALF) {
592 				umdevice->remote.param_1000hdx = B_TRUE;
593 			}
594 		}
595 
596 		lmstatus = lm_mread(lmdevice, lmdevice->params.phy_addr,
597 		    PHY_LINK_PARTNER_ABILITY_REG, &miireg);
598 		if (lmstatus == LM_STATUS_SUCCESS) {
599 			if (miireg & PHY_LINK_PARTNER_PAUSE_CAPABLE) {
600 				umdevice->remote.param_tx_pause = B_TRUE;
601 				umdevice->remote.param_rx_pause = B_TRUE;
602 			} else if (miireg & PHY_LINK_PARTNER_ASYM_PAUSE) {
603 				umdevice->remote.param_tx_pause = B_TRUE;
604 			}
605 
606 			if (miireg & PHY_LINK_PARTNER_100BASETX_FULL) {
607 				umdevice->remote.param_100fdx = B_TRUE;
608 			}
609 
610 			if (miireg & PHY_LINK_PARTNER_100BASETX_HALF) {
611 				umdevice->remote.param_100hdx = B_TRUE;
612 			}
613 
614 			if (miireg & PHY_LINK_PARTNER_10BASET_FULL) {
615 				umdevice->remote.param_10fdx = B_TRUE;
616 			}
617 
618 			if (miireg & PHY_LINK_PARTNER_10BASET_HALF) {
619 				umdevice->remote.param_10hdx = B_TRUE;
620 			}
621 		}
622 	}
623 
624 #if 0
625 	/*
626 	 * If we can gather _any_ information about our link partner, then
627 	 * because this information is exchanged through autonegotiation, we
628 	 * know that our link partner supports autonegotiation.
629 	 *
630 	 * FIXME -- Find a more authoritative way to update link_autoneg.  I'm
631 	 * not sure it is legal, but it sounds possible to have autonegotiation
632 	 * enabled on the remote end with no capabilities advertised.
633 	 */
634 	if (umdevice->remote.param_2500fdx ||
635 	    umdevice->remote.param_1000fdx ||
636 	    umdevice->remote.param_1000hdx ||
637 	    umdevice->remote.param_100fdx ||
638 	    umdevice->remote.param_100hdx ||
639 	    umdevice->remote.param_10fdx ||
640 	    umdevice->remote.param_10hdx ||
641 	    umdevice->remote.param_tx_pause ||
642 	    umdevice->remote.param_rx_pause) {
643 		umdevice->remote.param_autoneg = B_TRUE;
644 	}
645 #else
646 	lmstatus = lm_mread(lmdevice, lmdevice->params.phy_addr,
647 	    BCM540X_AUX_STATUS_REG, &miireg);
648 	if (lmstatus == LM_STATUS_SUCCESS) {
649 		if (miireg & BIT_12) {
650 			umdevice->remote.link_autoneg = B_TRUE;
651 		}
652 	}
653 #endif
654 } /* bnx_update_lp_cap */
655 
656 
657 
658 /*
659  * Name:        mm_indicate_link
660  *
661  * Input:       ptr to LM's device structure,
662  *              link status,
663  *              lm_medium_t struct
664  *
665  * Return:      None
666  *
667  * Description: Lower module calls this function when ever there is a network
668  *              link status change. This routine updates the driver data
669  *              structure as well calls gld_linkstate() to notify event to GLD.
670  */
671 void
mm_indicate_link(lm_device_t * lmdevice,lm_status_t link,lm_medium_t medium)672 mm_indicate_link(lm_device_t *lmdevice, lm_status_t link, lm_medium_t medium)
673 {
674 	int link_speed;
675 	um_device_t *umdevice;
676 
677 	umdevice = (um_device_t *)lmdevice;
678 
679 	if (umdevice->link_updates_ok == B_FALSE) {
680 		return;
681 	}
682 
683 	/* ignore link status if it has not changed since the last indicate */
684 	if ((umdevice->dev_var.indLink == link) &&
685 	    (umdevice->dev_var.indMedium == medium)) {
686 		return;
687 	}
688 
689 	umdevice->dev_var.indLink = link;
690 	umdevice->dev_var.indMedium = medium;
691 
692 	switch (GET_MEDIUM_SPEED(medium)) {
693 	case LM_MEDIUM_SPEED_10MBPS:
694 		link_speed = 10;
695 		break;
696 
697 	case LM_MEDIUM_SPEED_100MBPS:
698 		link_speed = 100;
699 		break;
700 
701 	case LM_MEDIUM_SPEED_1000MBPS:
702 		link_speed = 1000;
703 		break;
704 
705 	case LM_MEDIUM_SPEED_2500MBPS:
706 		link_speed = 2500;
707 		break;
708 
709 	default:
710 		link_speed = 0;
711 		break;
712 	}
713 
714 	/*
715 	 * Validate the linespeed against known hardware capabilities.
716 	 * This is a common occurance.
717 	 */
718 	if (umdevice->dev_var.isfiber) {
719 		if (link_speed != 2500 && link_speed != 1000) {
720 			link_speed = 0;
721 		}
722 	}
723 
724 	if (link_speed == 0) {
725 		link = LM_STATUS_LINK_DOWN;
726 	}
727 
728 	/*
729 	 * If neither link-up or link-down flag is present, then there must
730 	 * have been multiple link events.  Do the right thing.
731 	 */
732 	if (link != LM_STATUS_LINK_ACTIVE && link != LM_STATUS_LINK_DOWN) {
733 		/* Fill in the missing information. */
734 		if (link_speed != 0) {
735 			link = LM_STATUS_LINK_ACTIVE;
736 		} else {
737 			link = LM_STATUS_LINK_DOWN;
738 		}
739 	}
740 
741 #if 0
742 	if (((umdevice->nddcfg.link_speed == 0) &&
743 	    (link != LM_STATUS_LINK_ACTIVE)) ||
744 	    ((umdevice->nddcfg.link_speed != 0) &&
745 	    (link != LM_STATUS_LINK_DOWN))) {
746 		/* This is a false notification. */
747 		return;
748 	}
749 #endif
750 
751 	if (umdevice->timer_link_check_interval) {
752 		if (link == LM_STATUS_LINK_ACTIVE) {
753 			if (lmdevice->vars.serdes_fallback_status) {
754 				/*
755 				 * Start the timer to poll the serdes for
756 				 * reception of configs from the link partner.
757 				 * When this happens the remote has autoneg
758 				 * enabled and we'll restart our autoneg.
759 				 */
760 				bnx_link_timer_restart(umdevice);
761 			}
762 		} else {
763 			if (umdevice->timer_link_check_counter) {
764 				bnx_link_timer_restart(umdevice);
765 			}
766 		}
767 	}
768 
769 	if (link == LM_STATUS_LINK_DOWN) {
770 		umdevice->nddcfg.link_speed = 0;
771 		umdevice->nddcfg.link_duplex  = B_FALSE;
772 		umdevice->nddcfg.link_tx_pause = B_FALSE;
773 		umdevice->nddcfg.link_rx_pause = B_FALSE;
774 
775 		umdevice->remote.link_autoneg  = B_FALSE;
776 		umdevice->remote.param_2500fdx = B_FALSE;
777 		umdevice->remote.param_1000fdx = B_FALSE;
778 		umdevice->remote.param_1000hdx = B_FALSE;
779 		umdevice->remote.param_100fdx  = B_FALSE;
780 		umdevice->remote.param_100hdx  = B_FALSE;
781 		umdevice->remote.param_10fdx = B_FALSE;
782 		umdevice->remote.param_10hdx = B_FALSE;
783 		umdevice->remote.param_tx_pause = B_FALSE;
784 		umdevice->remote.param_rx_pause = B_FALSE;
785 
786 		bnx_display_link_msg(umdevice);
787 
788 		bnx_gld_link(umdevice, LINK_STATE_DOWN);
789 	} else if (link == LM_STATUS_LINK_ACTIVE) {
790 		umdevice->nddcfg.link_speed  = link_speed;
791 
792 		if (GET_MEDIUM_DUPLEX(medium)) {
793 			/* half duplex */
794 			umdevice->nddcfg.link_duplex = B_FALSE;
795 		} else {
796 			/* full duplex */
797 			umdevice->nddcfg.link_duplex = B_TRUE;
798 		}
799 
800 		if (lmdevice->vars.flow_control &
801 		    LM_FLOW_CONTROL_TRANSMIT_PAUSE) {
802 			umdevice->nddcfg.link_tx_pause = B_TRUE;
803 		} else {
804 			umdevice->nddcfg.link_tx_pause = B_FALSE;
805 		}
806 
807 		if (lmdevice->vars.flow_control &
808 		    LM_FLOW_CONTROL_RECEIVE_PAUSE) {
809 			umdevice->nddcfg.link_rx_pause = B_TRUE;
810 		} else {
811 			umdevice->nddcfg.link_rx_pause = B_FALSE;
812 		}
813 
814 		if (umdevice->curcfg.lnkcfg.link_autoneg == B_TRUE) {
815 			bnx_update_lp_cap(umdevice);
816 		}
817 
818 		bnx_display_link_msg(umdevice);
819 
820 		bnx_gld_link(umdevice, LINK_STATE_UP);
821 	}
822 } /* mm_indicate_link */
823 
824 
825 
826 /*
827  * Description:
828  *
829  * Return:
830  */
831 void
mm_acquire_ind_reg_lock(struct _lm_device_t * pdev)832 mm_acquire_ind_reg_lock(struct _lm_device_t *pdev)
833 {
834 	um_device_t *umdevice;
835 
836 	umdevice = (um_device_t *)pdev;
837 
838 	mutex_enter(&umdevice->os_param.ind_mutex);
839 } /* mm_acquire_ind_reg_lock */
840 
841 
842 
843 /*
844  * Description:
845  *
846  * Return:
847  */
848 void
mm_release_ind_reg_lock(struct _lm_device_t * pdev)849 mm_release_ind_reg_lock(struct _lm_device_t *pdev)
850 {
851 	um_device_t *umdevice;
852 
853 	umdevice = (um_device_t *)pdev;
854 
855 	mutex_exit(&umdevice->os_param.ind_mutex);
856 } /* mm_release_ind_reg_lock */
857