1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */
23 
24 /*
25  * Source file containing the implementation of the MailBox queue handling
26  * and related helper functions
27  */
28 
29 #include <oce_impl.h>
30 
31 /*
32  * function to drain a MCQ and process its CQEs
33  *
34  * dev - software handle to the device
35  * cq - pointer to the cq to drain
36  *
37  * return the number of CQEs processed
38  */
39 uint16_t
oce_drain_mq_cq(void * arg)40 oce_drain_mq_cq(void *arg)
41 {
42 	struct oce_mq_cqe *cqe = NULL;
43 	uint16_t num_cqe = 0;
44 	link_state_t link_status;
45 	struct oce_async_cqe_link_state *acqe;
46 	struct oce_mq *mq;
47 	struct oce_cq  *cq;
48 	struct oce_dev *dev;
49 
50 	/* do while we do not reach a cqe that is not valid */
51 	mq = (struct oce_mq *)arg;
52 	cq = mq->cq;
53 	dev = mq->parent;
54 	mutex_enter(&mq->lock);
55 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
56 	while (cqe->u0.dw[3]) {
57 		DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe));
58 		if (cqe->u0.s.async_event) {
59 			acqe = (struct oce_async_cqe_link_state *)cqe;
60 			if (acqe->u0.s.event_code ==
61 			    ASYNC_EVENT_CODE_LINK_STATE) {
62 				/*
63 				 * don't care logical or not,
64 				 * just check up down
65 				 */
66 
67 				link_status = ((acqe->u0.s.link_status &
68 				    ~ASYNC_EVENT_LOGICAL) ==
69 				    ASYNC_EVENT_LINK_UP) ?
70 				    LINK_STATE_UP: LINK_STATE_DOWN;
71 				mac_link_update(dev->mac_handle, link_status);
72 				dev->link_status = link_status;
73 				dev->link_speed = -1;
74 			}
75 		}
76 		cqe->u0.dw[3] = 0;
77 		RING_GET(cq->ring, 1);
78 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
79 		num_cqe++;
80 	} /* for all valid CQE */
81 	mutex_exit(&mq->lock);
82 	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
83 	return (num_cqe);
84 } /* oce_drain_mq_cq */
85 
86 int
oce_start_mq(struct oce_mq * mq)87 oce_start_mq(struct oce_mq *mq)
88 {
89 	oce_arm_cq(mq->parent, mq->cq->cq_id, 0, B_TRUE);
90 	return (0);
91 }
92 
93 
94 void
oce_clean_mq(struct oce_mq * mq)95 oce_clean_mq(struct oce_mq *mq)
96 {
97 	struct oce_cq  *cq;
98 	struct oce_dev *dev;
99 	uint16_t num_cqe = 0;
100 	struct oce_mq_cqe *cqe = NULL;
101 
102 	cq = mq->cq;
103 	dev = mq->parent;
104 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
105 	while (cqe->u0.dw[3]) {
106 		DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe));
107 		cqe->u0.dw[3] = 0;
108 		RING_GET(cq->ring, 1);
109 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
110 		num_cqe++;
111 	} /* for all valid CQE */
112 	if (num_cqe)
113 		oce_arm_cq(dev, cq->cq_id, num_cqe, B_FALSE);
114 	/* Drain the Event queue now */
115 	oce_drain_eq(mq->cq->eq);
116 }
117