1*9e39c5baSBill Taylor /*
2*9e39c5baSBill Taylor  * CDDL HEADER START
3*9e39c5baSBill Taylor  *
4*9e39c5baSBill Taylor  * The contents of this file are subject to the terms of the
5*9e39c5baSBill Taylor  * Common Development and Distribution License (the "License").
6*9e39c5baSBill Taylor  * You may not use this file except in compliance with the License.
7*9e39c5baSBill Taylor  *
8*9e39c5baSBill Taylor  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*9e39c5baSBill Taylor  * or http://www.opensolaris.org/os/licensing.
10*9e39c5baSBill Taylor  * See the License for the specific language governing permissions
11*9e39c5baSBill Taylor  * and limitations under the License.
12*9e39c5baSBill Taylor  *
13*9e39c5baSBill Taylor  * When distributing Covered Code, include this CDDL HEADER in each
14*9e39c5baSBill Taylor  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*9e39c5baSBill Taylor  * If applicable, add the following below this CDDL HEADER, with the
16*9e39c5baSBill Taylor  * fields enclosed by brackets "[]" replaced with your own identifying
17*9e39c5baSBill Taylor  * information: Portions Copyright [yyyy] [name of copyright owner]
18*9e39c5baSBill Taylor  *
19*9e39c5baSBill Taylor  * CDDL HEADER END
20*9e39c5baSBill Taylor  */
21*9e39c5baSBill Taylor 
22*9e39c5baSBill Taylor /*
23*9e39c5baSBill Taylor  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24*9e39c5baSBill Taylor  * Use is subject to license terms.
25*9e39c5baSBill Taylor  */
26*9e39c5baSBill Taylor 
27*9e39c5baSBill Taylor #include "dapl.h"
28*9e39c5baSBill Taylor #include "dapl_tavor_hw.h"
29*9e39c5baSBill Taylor #include "dapl_tavor_wr.h"
30*9e39c5baSBill Taylor #include "dapl_tavor_ibtf_impl.h"
31*9e39c5baSBill Taylor 
32*9e39c5baSBill Taylor #define	bt_debug	0
33*9e39c5baSBill Taylor 
34*9e39c5baSBill Taylor enum arbel_db_type_e {
35*9e39c5baSBill Taylor 	ARBEL_DBR_CQ_SET_CI	= 0x1 << 5,
36*9e39c5baSBill Taylor 	ARBEL_DBR_CQ_ARM	= 0x2 << 5,
37*9e39c5baSBill Taylor 	ARBEL_DBR_SQ		= 0x3 << 5,
38*9e39c5baSBill Taylor 	ARBEL_DBR_RQ		= 0x4 << 5,
39*9e39c5baSBill Taylor 	ARBEL_DBR_SRQ		= 0x5 << 5
40*9e39c5baSBill Taylor };
41*9e39c5baSBill Taylor 
42*9e39c5baSBill Taylor #define	ARBEL_WQE_SGL_INVALID_LKEY	0x00000100
43*9e39c5baSBill Taylor #define	ARBEL_WQE_SEND_SIGNALED_MASK	0x0000000800000000ull
44*9e39c5baSBill Taylor #define	ARBEL_WQE_SEND_SOLICIT_MASK	0x0000000200000000ull
45*9e39c5baSBill Taylor #define	ARBEL_WQE_CTRL_REQBIT_MASK	0x0000000100000000ull
46*9e39c5baSBill Taylor #define	ARBEL_WQE_NEXT_REQBIT_MASK	0x80
47*9e39c5baSBill Taylor #define	ARBEL_WQE_SETCTRL(qp, desc, ctrl) \
48*9e39c5baSBill Taylor 	((uint64_t *)(desc))[1] = HTOBE_64(ctrl)
49*9e39c5baSBill Taylor #define	ARBEL_WQE_SETNEXT(qp, desc, nda_op, ee_nds) \
50*9e39c5baSBill Taylor 	{ \
51*9e39c5baSBill Taylor 		((uint32_t *)(desc))[0] = HTOBE_32(nda_op); \
52*9e39c5baSBill Taylor 		((uint32_t *)(desc))[1] = HTOBE_32(ee_nds); \
53*9e39c5baSBill Taylor 	}
54*9e39c5baSBill Taylor #define	ARBEL_WQE_SEND_FENCE_MASK	0x40
55*9e39c5baSBill Taylor #define	ARBEL_WQE_SEND_NOPCODE_RDMAW	0x8
56*9e39c5baSBill Taylor #define	ARBEL_WQE_SEND_NOPCODE_SEND	0xA
57*9e39c5baSBill Taylor #define	ARBEL_WQE_SEND_NOPCODE_RDMAR	0x10
58*9e39c5baSBill Taylor #define	ARBEL_WQE_SEND_NOPCODE_BIND	0x18
59*9e39c5baSBill Taylor #define	ARBEL_WQE_NDA_MASK		0x00000000FFFFFFC0ull
60*9e39c5baSBill Taylor #define	ARBEL_WQE_NDS_MASK		0x3F
61*9e39c5baSBill Taylor #define	ARBEL_QPSNDDB_WQE_CNT_SHIFT	0x38
62*9e39c5baSBill Taylor #define	ARBEL_QPSNDDB_WQE_COUNTER_SHIFT	0x28
63*9e39c5baSBill Taylor #define	ARBEL_QPSNDDB_F_SHIFT		0x25
64*9e39c5baSBill Taylor #define	ARBEL_QPSNDDB_NOPCODE_SHIFT	0x20
65*9e39c5baSBill Taylor #define	ARBEL_QPSNDDB_QPN_SHIFT		0x8
66*9e39c5baSBill Taylor #define	ARBEL_DBR_QP_WQE_COUNTER_SHIFT	0x20
67*9e39c5baSBill Taylor #define	ARBEL_DBR_QN_SHIFT		0x8
68*9e39c5baSBill Taylor 
69*9e39c5baSBill Taylor #define	ARBEL_CQDB_NOTIFY_CQ_SOLICIT	0x1
70*9e39c5baSBill Taylor #define	ARBEL_CQDB_NOTIFY_CQ		0x2
71*9e39c5baSBill Taylor 
72*9e39c5baSBill Taylor /*
73*9e39c5baSBill Taylor  * Function signatures
74*9e39c5baSBill Taylor  */
75*9e39c5baSBill Taylor extern uint64_t dapls_tavor_wrid_get_entry(ib_cq_handle_t, tavor_hw_cqe_t *,
76*9e39c5baSBill Taylor     uint_t, uint_t, dapls_tavor_wrid_entry_t *);
77*9e39c5baSBill Taylor extern void dapls_tavor_wrid_cq_reap(ib_cq_handle_t);
78*9e39c5baSBill Taylor extern DAPL_OS_LOCK g_tavor_uar_lock;
79*9e39c5baSBill Taylor 
80*9e39c5baSBill Taylor #ifndef	_LP64
81*9e39c5baSBill Taylor extern void dapls_atomic_assign_64(uint64_t, uint64_t *);
82*9e39c5baSBill Taylor #endif
83*9e39c5baSBill Taylor 
84*9e39c5baSBill Taylor static int dapli_arbel_wqe_send_build(ib_qp_handle_t, ibt_send_wr_t *,
85*9e39c5baSBill Taylor     uint64_t *, uint_t *);
86*9e39c5baSBill Taylor static DAT_RETURN dapli_arbel_wqe_recv_build(ib_qp_handle_t, ibt_recv_wr_t *,
87*9e39c5baSBill Taylor     uint64_t *, uint_t *);
88*9e39c5baSBill Taylor static int dapli_arbel_cq_cqe_consume(ib_cq_handle_t, tavor_hw_cqe_t *,
89*9e39c5baSBill Taylor     ibt_wc_t *);
90*9e39c5baSBill Taylor static int dapli_arbel_cq_errcqe_consume(ib_cq_handle_t, tavor_hw_cqe_t *,
91*9e39c5baSBill Taylor     ibt_wc_t *);
92*9e39c5baSBill Taylor extern void dapli_tavor_wrid_add_entry(dapls_tavor_workq_hdr_t *, uint64_t,
93*9e39c5baSBill Taylor     uint32_t, uint_t);
94*9e39c5baSBill Taylor extern void dapli_tavor_wrid_add_entry_srq(ib_srq_handle_t, uint64_t, uint32_t);
95*9e39c5baSBill Taylor 
96*9e39c5baSBill Taylor /*
97*9e39c5baSBill Taylor  * Note: The 64 bit doorbells need to written atomically.
98*9e39c5baSBill Taylor  * In 32 bit libraries we need to use the special assembly rtn
99*9e39c5baSBill Taylor  * because compiler generated code splits into 2 word writes
100*9e39c5baSBill Taylor  */
101*9e39c5baSBill Taylor 
102*9e39c5baSBill Taylor /*
103*9e39c5baSBill Taylor  * dapli_arbel_cq_doorbell()
104*9e39c5baSBill Taylor  * Takes the specified cq cmd and cq number and rings the cq doorbell
105*9e39c5baSBill Taylor  */
106*9e39c5baSBill Taylor static void
dapli_arbel_cq_doorbell(dapls_hw_uar_t ia_uar,uint32_t cq_cmd,uint32_t cqn,uint32_t cmd_sn,uint32_t cq_param)107*9e39c5baSBill Taylor dapli_arbel_cq_doorbell(dapls_hw_uar_t ia_uar, uint32_t cq_cmd, uint32_t cqn,
108*9e39c5baSBill Taylor     uint32_t cmd_sn, uint32_t cq_param)
109*9e39c5baSBill Taylor {
110*9e39c5baSBill Taylor 	uint64_t doorbell;
111*9e39c5baSBill Taylor 
112*9e39c5baSBill Taylor 	/* Build the doorbell from the parameters */
113*9e39c5baSBill Taylor 	doorbell = (cmd_sn << 4) | cq_cmd;
114*9e39c5baSBill Taylor 	doorbell = (doorbell << 24) | cqn;
115*9e39c5baSBill Taylor 	doorbell = (doorbell << 32) | cq_param;
116*9e39c5baSBill Taylor 
117*9e39c5baSBill Taylor 	/* Write the doorbell to UAR */
118*9e39c5baSBill Taylor #ifdef _LP64
119*9e39c5baSBill Taylor 	((tavor_hw_uar_t *)ia_uar)->cq = HTOBE_64(doorbell);
120*9e39c5baSBill Taylor 	/* 32 bit version */
121*9e39c5baSBill Taylor #elif defined(i386)
122*9e39c5baSBill Taylor 	dapl_os_lock(&g_tavor_uar_lock);
123*9e39c5baSBill Taylor 	/*
124*9e39c5baSBill Taylor 	 * For 32 bit intel we assign the doorbell in the order
125*9e39c5baSBill Taylor 	 * prescribed by the Tavor PRM, lower to upper addresses
126*9e39c5baSBill Taylor 	 */
127*9e39c5baSBill Taylor 	((tavor_hw_uar32_t *)ia_uar)->cq[0] =
128*9e39c5baSBill Taylor 	    (uint32_t)HTOBE_32(doorbell >> 32);
129*9e39c5baSBill Taylor 	((tavor_hw_uar32_t *)ia_uar)->cq[1] =
130*9e39c5baSBill Taylor 	    (uint32_t)HTOBE_32(doorbell & 0x00000000ffffffff);
131*9e39c5baSBill Taylor 	dapl_os_unlock(&g_tavor_uar_lock);
132*9e39c5baSBill Taylor #else
133*9e39c5baSBill Taylor 	dapls_atomic_assign_64(HTOBE_64(doorbell),
134*9e39c5baSBill Taylor 	    &((tavor_hw_uar_t *)ia_uar)->cq);
135*9e39c5baSBill Taylor #endif
136*9e39c5baSBill Taylor }
137*9e39c5baSBill Taylor 
138*9e39c5baSBill Taylor /*
139*9e39c5baSBill Taylor  * dapli_arbel_qp_send_doorbell()
140*9e39c5baSBill Taylor  * Takes the specified next descriptor information, qp number, opcode and
141*9e39c5baSBill Taylor  * rings the send doorbell
142*9e39c5baSBill Taylor  */
143*9e39c5baSBill Taylor static void
dapli_arbel_sq_dbrec(ib_qp_handle_t qp,uint16_t wqe_counter)144*9e39c5baSBill Taylor dapli_arbel_sq_dbrec(ib_qp_handle_t qp, uint16_t wqe_counter)
145*9e39c5baSBill Taylor {
146*9e39c5baSBill Taylor 	qp->qp_sq_dbp[0] = HTOBE_32((wqe_counter + 1) & 0xffff);
147*9e39c5baSBill Taylor }
148*9e39c5baSBill Taylor 
149*9e39c5baSBill Taylor static void
dapli_arbel_sq_dbreg(dapls_hw_uar_t ia_uar,uint32_t qpn,uint32_t fence,uint32_t nopcode,uint16_t wqe_counter,uint32_t nds)150*9e39c5baSBill Taylor dapli_arbel_sq_dbreg(dapls_hw_uar_t ia_uar, uint32_t qpn, uint32_t fence,
151*9e39c5baSBill Taylor     uint32_t nopcode, uint16_t wqe_counter, uint32_t nds)
152*9e39c5baSBill Taylor {
153*9e39c5baSBill Taylor 	uint64_t doorbell;
154*9e39c5baSBill Taylor 
155*9e39c5baSBill Taylor 	doorbell = ((uint64_t)1 << ARBEL_QPSNDDB_WQE_CNT_SHIFT) |
156*9e39c5baSBill Taylor 	    ((uint64_t)wqe_counter << ARBEL_QPSNDDB_WQE_COUNTER_SHIFT) |
157*9e39c5baSBill Taylor 	    ((uint64_t)fence << ARBEL_QPSNDDB_F_SHIFT) |
158*9e39c5baSBill Taylor 	    ((uint64_t)nopcode << ARBEL_QPSNDDB_NOPCODE_SHIFT) |
159*9e39c5baSBill Taylor 	    (qpn << ARBEL_QPSNDDB_QPN_SHIFT) | nds;
160*9e39c5baSBill Taylor 
161*9e39c5baSBill Taylor 	/* Write the doorbell to UAR */
162*9e39c5baSBill Taylor #ifdef _LP64
163*9e39c5baSBill Taylor 	((tavor_hw_uar_t *)ia_uar)->send = HTOBE_64(doorbell);
164*9e39c5baSBill Taylor #else
165*9e39c5baSBill Taylor #if defined(i386)
166*9e39c5baSBill Taylor 	dapl_os_lock(&g_tavor_uar_lock);
167*9e39c5baSBill Taylor 	/*
168*9e39c5baSBill Taylor 	 * For 32 bit intel we assign the doorbell in the order
169*9e39c5baSBill Taylor 	 * prescribed by the Tavor PRM, lower to upper addresses
170*9e39c5baSBill Taylor 	 */
171*9e39c5baSBill Taylor 	((tavor_hw_uar32_t *)ia_uar)->send[0] =
172*9e39c5baSBill Taylor 	    (uint32_t)HTOBE_32(doorbell >> 32);
173*9e39c5baSBill Taylor 	((tavor_hw_uar32_t *)ia_uar)->send[1] =
174*9e39c5baSBill Taylor 	    (uint32_t)HTOBE_32(doorbell & 0x00000000ffffffff);
175*9e39c5baSBill Taylor 	dapl_os_unlock(&g_tavor_uar_lock);
176*9e39c5baSBill Taylor #else
177*9e39c5baSBill Taylor 	dapls_atomic_assign_64(HTOBE_64(doorbell),
178*9e39c5baSBill Taylor 	    &((tavor_hw_uar_t *)ia_uar)->send);
179*9e39c5baSBill Taylor #endif
180*9e39c5baSBill Taylor #endif
181*9e39c5baSBill Taylor }
182*9e39c5baSBill Taylor 
183*9e39c5baSBill Taylor /*
184*9e39c5baSBill Taylor  * dapli_arbel_wqe_send_build()
185*9e39c5baSBill Taylor  * Constructs a WQE for a given ibt_send_wr_t
186*9e39c5baSBill Taylor  */
187*9e39c5baSBill Taylor static int
dapli_arbel_wqe_send_build(ib_qp_handle_t qp,ibt_send_wr_t * wr,uint64_t * addr,uint_t * size)188*9e39c5baSBill Taylor dapli_arbel_wqe_send_build(ib_qp_handle_t qp, ibt_send_wr_t *wr,
189*9e39c5baSBill Taylor     uint64_t *addr, uint_t *size)
190*9e39c5baSBill Taylor {
191*9e39c5baSBill Taylor 	tavor_hw_snd_wqe_remaddr_t	*rc;
192*9e39c5baSBill Taylor 	tavor_hw_snd_wqe_bind_t		*bn;
193*9e39c5baSBill Taylor 	tavor_hw_wqe_sgl_t		*ds;
194*9e39c5baSBill Taylor 	ibt_wr_ds_t			*sgl;
195*9e39c5baSBill Taylor 	uint32_t			nds;
196*9e39c5baSBill Taylor 	uint32_t			len, total_len;
197*9e39c5baSBill Taylor 	uint32_t			new_rkey;
198*9e39c5baSBill Taylor 	uint32_t			old_rkey;
199*9e39c5baSBill Taylor 	int				i, num_ds;
200*9e39c5baSBill Taylor 	int				max_inline_bytes = -1;
201*9e39c5baSBill Taylor 	uint64_t			ctrl;
202*9e39c5baSBill Taylor 
203*9e39c5baSBill Taylor 	nds = wr->wr_nds;
204*9e39c5baSBill Taylor 	sgl = wr->wr_sgl;
205*9e39c5baSBill Taylor 	num_ds = 0;
206*9e39c5baSBill Taylor 	ctrl = ((wr->wr_flags & IBT_WR_SEND_SIGNAL) ?
207*9e39c5baSBill Taylor 	    ARBEL_WQE_SEND_SIGNALED_MASK : 0) |
208*9e39c5baSBill Taylor 	    ((wr->wr_flags & IBT_WR_SEND_SOLICIT) ?
209*9e39c5baSBill Taylor 	    ARBEL_WQE_SEND_SOLICIT_MASK : 0) |
210*9e39c5baSBill Taylor 	    ARBEL_WQE_CTRL_REQBIT_MASK;
211*9e39c5baSBill Taylor 
212*9e39c5baSBill Taylor 	/*
213*9e39c5baSBill Taylor 	 * RC is the only supported transport in UDAPL
214*9e39c5baSBill Taylor 	 * For RC requests, we allow "Send", "RDMA Read", "RDMA Write"
215*9e39c5baSBill Taylor 	 */
216*9e39c5baSBill Taylor 	switch (wr->wr_opcode) {
217*9e39c5baSBill Taylor 	case IBT_WRC_SEND:
218*9e39c5baSBill Taylor 		/*
219*9e39c5baSBill Taylor 		 * If this is a Send request, then all we need is
220*9e39c5baSBill Taylor 		 * the Data Segment processing below.
221*9e39c5baSBill Taylor 		 * Initialize the information for the Data Segments
222*9e39c5baSBill Taylor 		 */
223*9e39c5baSBill Taylor 		ds = (tavor_hw_wqe_sgl_t *)((uintptr_t)addr +
224*9e39c5baSBill Taylor 		    sizeof (tavor_hw_snd_wqe_nextctrl_t));
225*9e39c5baSBill Taylor 		if (qp->qp_sq_inline != 0)
226*9e39c5baSBill Taylor 			max_inline_bytes =
227*9e39c5baSBill Taylor 			    qp->qp_sq_wqesz - TAVOR_INLINE_HEADER_SIZE_SEND;
228*9e39c5baSBill Taylor 		break;
229*9e39c5baSBill Taylor 	case IBT_WRC_RDMAW:
230*9e39c5baSBill Taylor 		if (qp->qp_sq_inline != 0)
231*9e39c5baSBill Taylor 			max_inline_bytes =
232*9e39c5baSBill Taylor 			    qp->qp_sq_wqesz - TAVOR_INLINE_HEADER_SIZE_RDMAW;
233*9e39c5baSBill Taylor 		/* FALLTHROUGH */
234*9e39c5baSBill Taylor 	case IBT_WRC_RDMAR:
235*9e39c5baSBill Taylor 		if (qp->qp_sq_inline < 0 && wr->wr_opcode == IBT_WRC_RDMAR)
236*9e39c5baSBill Taylor 			qp->qp_sq_inline = 0;
237*9e39c5baSBill Taylor 		/*
238*9e39c5baSBill Taylor 		 * If this is an RDMA Read or RDMA Write request, then fill
239*9e39c5baSBill Taylor 		 * in the "Remote Address" header fields.
240*9e39c5baSBill Taylor 		 */
241*9e39c5baSBill Taylor 		rc = (tavor_hw_snd_wqe_remaddr_t *)((uintptr_t)addr +
242*9e39c5baSBill Taylor 		    sizeof (tavor_hw_snd_wqe_nextctrl_t));
243*9e39c5baSBill Taylor 
244*9e39c5baSBill Taylor 		/*
245*9e39c5baSBill Taylor 		 * Build the Remote Address Segment for the WQE, using
246*9e39c5baSBill Taylor 		 * the information from the RC work request.
247*9e39c5baSBill Taylor 		 */
248*9e39c5baSBill Taylor 		TAVOR_WQE_BUILD_REMADDR(rc, &wr->wr.rc.rcwr.rdma);
249*9e39c5baSBill Taylor 
250*9e39c5baSBill Taylor 		/* Update "ds" for filling in Data Segments (below) */
251*9e39c5baSBill Taylor 		ds = (tavor_hw_wqe_sgl_t *)((uintptr_t)rc +
252*9e39c5baSBill Taylor 		    sizeof (tavor_hw_snd_wqe_remaddr_t));
253*9e39c5baSBill Taylor 		break;
254*9e39c5baSBill Taylor 	case IBT_WRC_BIND:
255*9e39c5baSBill Taylor 		/*
256*9e39c5baSBill Taylor 		 * Generate a new R_key
257*9e39c5baSBill Taylor 		 * Increment the upper "unconstrained" bits and need to keep
258*9e39c5baSBill Taylor 		 * the lower "constrained" bits the same it represents
259*9e39c5baSBill Taylor 		 * the MPT index.
260*9e39c5baSBill Taylor 		 */
261*9e39c5baSBill Taylor #if 0
262*9e39c5baSBill Taylor 	/* XXX - need equiv of "arbel_wr_bind_check(state, wr);" */
263*9e39c5baSBill Taylor 	/* XXX - uses arbel_mr_keycalc - what about Sinai vs. Arbel??? */
264*9e39c5baSBill Taylor #endif
265*9e39c5baSBill Taylor 		old_rkey = wr->wr.rc.rcwr.bind->bind_rkey;
266*9e39c5baSBill Taylor 		new_rkey = old_rkey >> 8;	/* index */
267*9e39c5baSBill Taylor 		old_rkey = ((old_rkey & 0xff) + 1) & 0xff; /* incremented key */
268*9e39c5baSBill Taylor 		new_rkey = (new_rkey << 8) | old_rkey;
269*9e39c5baSBill Taylor 
270*9e39c5baSBill Taylor 		wr->wr.rc.rcwr.bind->bind_rkey_out = new_rkey;
271*9e39c5baSBill Taylor 
272*9e39c5baSBill Taylor 		bn = (tavor_hw_snd_wqe_bind_t *)((uintptr_t)addr +
273*9e39c5baSBill Taylor 		    sizeof (tavor_hw_snd_wqe_nextctrl_t));
274*9e39c5baSBill Taylor 
275*9e39c5baSBill Taylor 		/*
276*9e39c5baSBill Taylor 		 * Build the Bind Memory Window Segments for the WQE,
277*9e39c5baSBill Taylor 		 * using the information from the RC Bind memory
278*9e39c5baSBill Taylor 		 * window work request.
279*9e39c5baSBill Taylor 		 */
280*9e39c5baSBill Taylor 		TAVOR_WQE_BUILD_BIND(bn, wr->wr.rc.rcwr.bind);
281*9e39c5baSBill Taylor 
282*9e39c5baSBill Taylor 		/*
283*9e39c5baSBill Taylor 		 * Update the "ds" pointer.  Even though the "bind"
284*9e39c5baSBill Taylor 		 * operation requires no SGLs, this is necessary to
285*9e39c5baSBill Taylor 		 * facilitate the correct descriptor size calculations
286*9e39c5baSBill Taylor 		 * (below).
287*9e39c5baSBill Taylor 		 */
288*9e39c5baSBill Taylor 		ds = (tavor_hw_wqe_sgl_t *)((uintptr_t)bn +
289*9e39c5baSBill Taylor 		    sizeof (tavor_hw_snd_wqe_bind_t));
290*9e39c5baSBill Taylor 		nds = 0;
291*9e39c5baSBill Taylor 		break;
292*9e39c5baSBill Taylor 	default:
293*9e39c5baSBill Taylor 		dapl_dbg_log(DAPL_DBG_TYPE_ERR,
294*9e39c5baSBill Taylor 		    "dapli_arbel_wqe_send_build: invalid wr_opcode=%d\n",
295*9e39c5baSBill Taylor 		    wr->wr_opcode);
296*9e39c5baSBill Taylor 		return (DAT_INTERNAL_ERROR);
297*9e39c5baSBill Taylor 	}
298*9e39c5baSBill Taylor 
299*9e39c5baSBill Taylor 	/*
300*9e39c5baSBill Taylor 	 * Now fill in the Data Segments (SGL) for the Send WQE based on
301*9e39c5baSBill Taylor 	 * the values setup above (i.e. "sgl", "nds", and the "ds" pointer
302*9e39c5baSBill Taylor 	 * Start by checking for a valid number of SGL entries
303*9e39c5baSBill Taylor 	 */
304*9e39c5baSBill Taylor 	if (nds > qp->qp_sq_sgl) {
305*9e39c5baSBill Taylor 		return (DAT_INVALID_PARAMETER);
306*9e39c5baSBill Taylor 	}
307*9e39c5baSBill Taylor 
308*9e39c5baSBill Taylor 	/*
309*9e39c5baSBill Taylor 	 * For each SGL in the Send Work Request, fill in the Send WQE's data
310*9e39c5baSBill Taylor 	 * segments.  Note: We skip any SGL with zero size because Tavor
311*9e39c5baSBill Taylor 	 * hardware cannot handle a zero for "byte_cnt" in the WQE.  Actually
312*9e39c5baSBill Taylor 	 * the encoding for zero means a 2GB transfer.  Because of this special
313*9e39c5baSBill Taylor 	 * encoding in the hardware, we mask the requested length with
314*9e39c5baSBill Taylor 	 * TAVOR_WQE_SGL_BYTE_CNT_MASK (so that 2GB will end up encoded as
315*9e39c5baSBill Taylor 	 * zero.)
316*9e39c5baSBill Taylor 	 */
317*9e39c5baSBill Taylor 	if (max_inline_bytes != -1) {		/* compute total_len */
318*9e39c5baSBill Taylor 		total_len = 0;
319*9e39c5baSBill Taylor 		for (i = 0; i < nds; i++)
320*9e39c5baSBill Taylor 			total_len += sgl[i].ds_len;
321*9e39c5baSBill Taylor 		if (total_len > max_inline_bytes)
322*9e39c5baSBill Taylor 			max_inline_bytes = -1;	/* too big, do not "inline" */
323*9e39c5baSBill Taylor 	}
324*9e39c5baSBill Taylor 	if (max_inline_bytes != -1) {		/* do "inline" */
325*9e39c5baSBill Taylor 		uint8_t *dst = (uint8_t *)((uint32_t *)ds + 1);
326*9e39c5baSBill Taylor 		*(uint32_t *)ds =
327*9e39c5baSBill Taylor 		    HTOBE_32(total_len | TAVOR_WQE_SGL_INLINE_MASK);
328*9e39c5baSBill Taylor 		for (i = 0; i < nds; i++) {
329*9e39c5baSBill Taylor 			if ((len = sgl[i].ds_len) == 0) {
330*9e39c5baSBill Taylor 				continue;
331*9e39c5baSBill Taylor 			}
332*9e39c5baSBill Taylor 			(void) dapl_os_memcpy(dst,
333*9e39c5baSBill Taylor 			    (void *)(uintptr_t)sgl[i].ds_va, len);
334*9e39c5baSBill Taylor 			dst += len;
335*9e39c5baSBill Taylor 		}
336*9e39c5baSBill Taylor 		/* Return the size of descriptor (in 16-byte chunks) */
337*9e39c5baSBill Taylor 		*size = ((uintptr_t)dst - (uintptr_t)addr + 15) >> 4;
338*9e39c5baSBill Taylor 	} else {
339*9e39c5baSBill Taylor 		for (i = 0; i < nds; i++) {
340*9e39c5baSBill Taylor 			if (sgl[i].ds_len == 0) {
341*9e39c5baSBill Taylor 				continue;
342*9e39c5baSBill Taylor 			}
343*9e39c5baSBill Taylor 
344*9e39c5baSBill Taylor 			/*
345*9e39c5baSBill Taylor 			 * Fill in the Data Segment(s) for the current WQE,
346*9e39c5baSBill Taylor 			 * using the information contained in the
347*9e39c5baSBill Taylor 			 * scatter-gather list of the work request.
348*9e39c5baSBill Taylor 			 */
349*9e39c5baSBill Taylor 			TAVOR_WQE_BUILD_DATA_SEG(&ds[num_ds], &sgl[i]);
350*9e39c5baSBill Taylor 			num_ds++;
351*9e39c5baSBill Taylor 		}
352*9e39c5baSBill Taylor 
353*9e39c5baSBill Taylor 		/* Return the size of descriptor (in 16-byte chunks) */
354*9e39c5baSBill Taylor 		*size = ((uintptr_t)&ds[num_ds] - (uintptr_t)addr) >> 4;
355*9e39c5baSBill Taylor 	}
356*9e39c5baSBill Taylor 	ARBEL_WQE_SETCTRL(qp, addr, ctrl);
357*9e39c5baSBill Taylor 
358*9e39c5baSBill Taylor 	return (DAT_SUCCESS);
359*9e39c5baSBill Taylor }
360*9e39c5baSBill Taylor 
361*9e39c5baSBill Taylor /*
362*9e39c5baSBill Taylor  * dapli_arbel_wqe_send_linknext()
363*9e39c5baSBill Taylor  * Takes a WQE and links it to the prev WQE chain
364*9e39c5baSBill Taylor  */
365*9e39c5baSBill Taylor static void
dapli_arbel_wqe_send_linknext(ibt_send_wr_t * curr_wr,uint32_t curr_desc,uint_t curr_descsz,uint64_t * prev_addr,tavor_sw_wqe_dbinfo_t * dbinfo)366*9e39c5baSBill Taylor dapli_arbel_wqe_send_linknext(ibt_send_wr_t *curr_wr,
367*9e39c5baSBill Taylor     uint32_t curr_desc, uint_t curr_descsz, uint64_t *prev_addr,
368*9e39c5baSBill Taylor     tavor_sw_wqe_dbinfo_t *dbinfo)
369*9e39c5baSBill Taylor {
370*9e39c5baSBill Taylor 	uint32_t	nopcode, fence, nda_op, ee_nds;
371*9e39c5baSBill Taylor 
372*9e39c5baSBill Taylor 	/*
373*9e39c5baSBill Taylor 	 * Calculate the "next" field of the prev descriptor.  This amounts
374*9e39c5baSBill Taylor 	 * to setting up the "next_wqe_addr", "nopcode", "fence", and "nds"
375*9e39c5baSBill Taylor 	 * fields (see tavor_hw.h for more).
376*9e39c5baSBill Taylor 	 */
377*9e39c5baSBill Taylor 
378*9e39c5baSBill Taylor 	/*
379*9e39c5baSBill Taylor 	 * Determine the value for the Tavor WQE "nopcode" field
380*9e39c5baSBill Taylor 	 * by using the IBTF opcode from the work request
381*9e39c5baSBill Taylor 	 */
382*9e39c5baSBill Taylor 	switch (curr_wr->wr_opcode) {
383*9e39c5baSBill Taylor 	case IBT_WRC_RDMAW:
384*9e39c5baSBill Taylor 		nopcode = ARBEL_WQE_SEND_NOPCODE_RDMAW;
385*9e39c5baSBill Taylor 		break;
386*9e39c5baSBill Taylor 
387*9e39c5baSBill Taylor 	case IBT_WRC_SEND:
388*9e39c5baSBill Taylor 		nopcode = ARBEL_WQE_SEND_NOPCODE_SEND;
389*9e39c5baSBill Taylor 		break;
390*9e39c5baSBill Taylor 
391*9e39c5baSBill Taylor 	case IBT_WRC_RDMAR:
392*9e39c5baSBill Taylor 		nopcode = ARBEL_WQE_SEND_NOPCODE_RDMAR;
393*9e39c5baSBill Taylor 		break;
394*9e39c5baSBill Taylor 
395*9e39c5baSBill Taylor 	case IBT_WRC_BIND:
396*9e39c5baSBill Taylor 		nopcode = ARBEL_WQE_SEND_NOPCODE_BIND;
397*9e39c5baSBill Taylor 		break;
398*9e39c5baSBill Taylor 	default:
399*9e39c5baSBill Taylor 		/* Unsupported opcodes in UDAPL */
400*9e39c5baSBill Taylor 		dapl_dbg_log(DAPL_DBG_TYPE_ERR,
401*9e39c5baSBill Taylor 		    "dapli_arbel_wqe_send_linknext: invalid nopcode=%d\n",
402*9e39c5baSBill Taylor 		    nopcode);
403*9e39c5baSBill Taylor 		return;
404*9e39c5baSBill Taylor 	}
405*9e39c5baSBill Taylor 
406*9e39c5baSBill Taylor 	fence = (curr_wr->wr_flags & IBT_WR_SEND_FENCE) ? 1 : 0;
407*9e39c5baSBill Taylor 	nda_op = ((uintptr_t)curr_desc & ARBEL_WQE_NDA_MASK) | nopcode;
408*9e39c5baSBill Taylor 	ee_nds = ((fence == 1) ? ARBEL_WQE_SEND_FENCE_MASK : 0) |
409*9e39c5baSBill Taylor 	    (curr_descsz & ARBEL_WQE_NDS_MASK) |
410*9e39c5baSBill Taylor 	    ARBEL_WQE_NEXT_REQBIT_MASK;
411*9e39c5baSBill Taylor 
412*9e39c5baSBill Taylor 	/*
413*9e39c5baSBill Taylor 	 * A send queue doorbell will be rung for the next
414*9e39c5baSBill Taylor 	 * WQE on the chain, set the current WQE's "dbd" bit.
415*9e39c5baSBill Taylor 	 * Note: We also update the "dbinfo" structure here to pass
416*9e39c5baSBill Taylor 	 * back information about what should (later) be included
417*9e39c5baSBill Taylor 	 * in the send queue doorbell.
418*9e39c5baSBill Taylor 	 */
419*9e39c5baSBill Taylor 	dbinfo->db_nopcode = nopcode;
420*9e39c5baSBill Taylor 	dbinfo->db_fence   = fence;
421*9e39c5baSBill Taylor 
422*9e39c5baSBill Taylor 	ARBEL_WQE_SETNEXT(qp, prev_addr, nda_op, ee_nds);
423*9e39c5baSBill Taylor }
424*9e39c5baSBill Taylor 
425*9e39c5baSBill Taylor 
426*9e39c5baSBill Taylor /*
427*9e39c5baSBill Taylor  * dapli_arbel_wqe_recv_build()
428*9e39c5baSBill Taylor  * Builds the recv WQE for a given ibt_recv_wr_t
429*9e39c5baSBill Taylor  */
430*9e39c5baSBill Taylor static DAT_RETURN
dapli_arbel_wqe_recv_build(ib_qp_handle_t qp,ibt_recv_wr_t * wr,uint64_t * addr,uint_t * size)431*9e39c5baSBill Taylor dapli_arbel_wqe_recv_build(ib_qp_handle_t qp, ibt_recv_wr_t *wr,
432*9e39c5baSBill Taylor     uint64_t *addr, uint_t *size)
433*9e39c5baSBill Taylor {
434*9e39c5baSBill Taylor 	tavor_hw_wqe_sgl_t	*ds;
435*9e39c5baSBill Taylor 	int			i;
436*9e39c5baSBill Taylor 	int			num_ds;
437*9e39c5baSBill Taylor 
438*9e39c5baSBill Taylor 	/* Fill in the Data Segments (SGL) for the Recv WQE */
439*9e39c5baSBill Taylor 	ds = (tavor_hw_wqe_sgl_t *)((uintptr_t)addr +
440*9e39c5baSBill Taylor 	    sizeof (tavor_hw_rcv_wqe_nextctrl_t));
441*9e39c5baSBill Taylor 	num_ds = 0;
442*9e39c5baSBill Taylor 
443*9e39c5baSBill Taylor 	/* Check for valid number of SGL entries */
444*9e39c5baSBill Taylor 	if (wr->wr_nds > qp->qp_rq_sgl) {
445*9e39c5baSBill Taylor 		return (DAT_INVALID_PARAMETER);
446*9e39c5baSBill Taylor 	}
447*9e39c5baSBill Taylor 
448*9e39c5baSBill Taylor 	/*
449*9e39c5baSBill Taylor 	 * For each SGL in the Recv Work Request, fill in the Recv WQE's data
450*9e39c5baSBill Taylor 	 * segments.  Note: We skip any SGL with zero size because Tavor
451*9e39c5baSBill Taylor 	 * hardware cannot handle a zero for "byte_cnt" in the WQE.  Actually
452*9e39c5baSBill Taylor 	 * the encoding for zero means a 2GB transfer.  Because of this special
453*9e39c5baSBill Taylor 	 * encoding in the hardware, we mask the requested length with
454*9e39c5baSBill Taylor 	 * TAVOR_WQE_SGL_BYTE_CNT_MASK (so that 2GB will end up encoded as
455*9e39c5baSBill Taylor 	 * zero.)
456*9e39c5baSBill Taylor 	 */
457*9e39c5baSBill Taylor 	for (i = 0; i < wr->wr_nds; i++) {
458*9e39c5baSBill Taylor 		if (wr->wr_sgl[i].ds_len == 0) {
459*9e39c5baSBill Taylor 			continue;
460*9e39c5baSBill Taylor 		}
461*9e39c5baSBill Taylor 
462*9e39c5baSBill Taylor 		/*
463*9e39c5baSBill Taylor 		 * Fill in the Data Segment(s) for the receive WQE, using the
464*9e39c5baSBill Taylor 		 * information contained in the scatter-gather list of the
465*9e39c5baSBill Taylor 		 * work request.
466*9e39c5baSBill Taylor 		 */
467*9e39c5baSBill Taylor 		TAVOR_WQE_BUILD_DATA_SEG(&ds[num_ds], &wr->wr_sgl[i]);
468*9e39c5baSBill Taylor 		num_ds++;
469*9e39c5baSBill Taylor 	}
470*9e39c5baSBill Taylor 	if (i < qp->qp_rq_sgl) {
471*9e39c5baSBill Taylor 		ibt_wr_ds_t sgl;
472*9e39c5baSBill Taylor 		sgl.ds_va  = (ib_vaddr_t)0;
473*9e39c5baSBill Taylor 		sgl.ds_len = (ib_msglen_t)0;
474*9e39c5baSBill Taylor 		sgl.ds_key = (ibt_lkey_t)ARBEL_WQE_SGL_INVALID_LKEY;
475*9e39c5baSBill Taylor 		TAVOR_WQE_BUILD_DATA_SEG(&ds[num_ds], &sgl);
476*9e39c5baSBill Taylor 	}
477*9e39c5baSBill Taylor 
478*9e39c5baSBill Taylor 	/* Return the size of descriptor (in 16-byte chunks) */
479*9e39c5baSBill Taylor 	*size = qp->qp_rq_wqesz >> 4;
480*9e39c5baSBill Taylor 
481*9e39c5baSBill Taylor 	return (DAT_SUCCESS);
482*9e39c5baSBill Taylor }
483*9e39c5baSBill Taylor 
484*9e39c5baSBill Taylor /*
485*9e39c5baSBill Taylor  * dapli_arbel_wqe_srq_build()
486*9e39c5baSBill Taylor  * Builds the recv WQE for a given ibt_recv_wr_t
487*9e39c5baSBill Taylor  */
488*9e39c5baSBill Taylor static DAT_RETURN
dapli_arbel_wqe_srq_build(ib_srq_handle_t srq,ibt_recv_wr_t * wr,uint64_t * addr)489*9e39c5baSBill Taylor dapli_arbel_wqe_srq_build(ib_srq_handle_t srq, ibt_recv_wr_t *wr,
490*9e39c5baSBill Taylor     uint64_t *addr)
491*9e39c5baSBill Taylor {
492*9e39c5baSBill Taylor 	tavor_hw_wqe_sgl_t	*ds;
493*9e39c5baSBill Taylor 	ibt_wr_ds_t		end_sgl;
494*9e39c5baSBill Taylor 	int			i;
495*9e39c5baSBill Taylor 	int			num_ds;
496*9e39c5baSBill Taylor 
497*9e39c5baSBill Taylor 	/* Fill in the Data Segments (SGL) for the Recv WQE */
498*9e39c5baSBill Taylor 	ds = (tavor_hw_wqe_sgl_t *)((uintptr_t)addr +
499*9e39c5baSBill Taylor 	    sizeof (tavor_hw_rcv_wqe_nextctrl_t));
500*9e39c5baSBill Taylor 	num_ds = 0;
501*9e39c5baSBill Taylor 
502*9e39c5baSBill Taylor 	/* Check for valid number of SGL entries */
503*9e39c5baSBill Taylor 	if (wr->wr_nds > srq->srq_wq_sgl) {
504*9e39c5baSBill Taylor 		return (DAT_INVALID_PARAMETER);
505*9e39c5baSBill Taylor 	}
506*9e39c5baSBill Taylor 
507*9e39c5baSBill Taylor 	/*
508*9e39c5baSBill Taylor 	 * For each SGL in the Recv Work Request, fill in the Recv WQE's data
509*9e39c5baSBill Taylor 	 * segments.  Note: We skip any SGL with zero size because Tavor
510*9e39c5baSBill Taylor 	 * hardware cannot handle a zero for "byte_cnt" in the WQE.  Actually
511*9e39c5baSBill Taylor 	 * the encoding for zero means a 2GB transfer.  Because of this special
512*9e39c5baSBill Taylor 	 * encoding in the hardware, we mask the requested length with
513*9e39c5baSBill Taylor 	 * TAVOR_WQE_SGL_BYTE_CNT_MASK (so that 2GB will end up encoded as
514*9e39c5baSBill Taylor 	 * zero.)
515*9e39c5baSBill Taylor 	 */
516*9e39c5baSBill Taylor 	for (i = 0; i < wr->wr_nds; i++) {
517*9e39c5baSBill Taylor 		if (wr->wr_sgl[i].ds_len == 0) {
518*9e39c5baSBill Taylor 			continue;
519*9e39c5baSBill Taylor 		}
520*9e39c5baSBill Taylor 
521*9e39c5baSBill Taylor 		/*
522*9e39c5baSBill Taylor 		 * Fill in the Data Segment(s) for the receive WQE, using the
523*9e39c5baSBill Taylor 		 * information contained in the scatter-gather list of the
524*9e39c5baSBill Taylor 		 * work request.
525*9e39c5baSBill Taylor 		 */
526*9e39c5baSBill Taylor 		TAVOR_WQE_BUILD_DATA_SEG(&ds[num_ds], &wr->wr_sgl[i]);
527*9e39c5baSBill Taylor 		num_ds++;
528*9e39c5baSBill Taylor 	}
529*9e39c5baSBill Taylor 
530*9e39c5baSBill Taylor 	/*
531*9e39c5baSBill Taylor 	 * For SRQ, if the number of data segments is less than the maximum
532*9e39c5baSBill Taylor 	 * specified at alloc, then we have to fill in a special "key" entry in
533*9e39c5baSBill Taylor 	 * the sgl entry after the last valid one in this post request.  We do
534*9e39c5baSBill Taylor 	 * that here.
535*9e39c5baSBill Taylor 	 */
536*9e39c5baSBill Taylor 	if (num_ds < srq->srq_wq_sgl) {
537*9e39c5baSBill Taylor 		end_sgl.ds_va  = (ib_vaddr_t)0;
538*9e39c5baSBill Taylor 		end_sgl.ds_len = (ib_msglen_t)0;
539*9e39c5baSBill Taylor 		end_sgl.ds_key = (ibt_lkey_t)ARBEL_WQE_SGL_INVALID_LKEY;
540*9e39c5baSBill Taylor 		TAVOR_WQE_BUILD_DATA_SEG(&ds[num_ds], &end_sgl);
541*9e39c5baSBill Taylor 	}
542*9e39c5baSBill Taylor 
543*9e39c5baSBill Taylor 	return (DAT_SUCCESS);
544*9e39c5baSBill Taylor }
545*9e39c5baSBill Taylor 
546*9e39c5baSBill Taylor /*
547*9e39c5baSBill Taylor  * dapli_arbel_cq_peek()
548*9e39c5baSBill Taylor  * Peeks into a given CQ to check if there are any events that can be
549*9e39c5baSBill Taylor  * polled. It returns the number of CQEs that can be polled.
550*9e39c5baSBill Taylor  */
551*9e39c5baSBill Taylor static void
dapli_arbel_cq_peek(ib_cq_handle_t cq,int * num_cqe)552*9e39c5baSBill Taylor dapli_arbel_cq_peek(ib_cq_handle_t cq, int *num_cqe)
553*9e39c5baSBill Taylor {
554*9e39c5baSBill Taylor 	tavor_hw_cqe_t		*cqe;
555*9e39c5baSBill Taylor 	uint32_t		imm_eth_pkey_cred;
556*9e39c5baSBill Taylor 	uint32_t		cons_indx;
557*9e39c5baSBill Taylor 	uint32_t		wrap_around_mask;
558*9e39c5baSBill Taylor 	uint32_t		polled_cnt;
559*9e39c5baSBill Taylor 	uint_t			doorbell_cnt;
560*9e39c5baSBill Taylor 	uint_t			opcode;
561*9e39c5baSBill Taylor 
562*9e39c5baSBill Taylor 	/* Get the consumer index */
563*9e39c5baSBill Taylor 	cons_indx = cq->cq_consindx;
564*9e39c5baSBill Taylor 
565*9e39c5baSBill Taylor 	/*
566*9e39c5baSBill Taylor 	 * Calculate the wrap around mask.  Note: This operation only works
567*9e39c5baSBill Taylor 	 * because all Tavor completion queues have power-of-2 sizes
568*9e39c5baSBill Taylor 	 */
569*9e39c5baSBill Taylor 	wrap_around_mask = (cq->cq_size - 1);
570*9e39c5baSBill Taylor 
571*9e39c5baSBill Taylor 	/* Calculate the pointer to the first CQ entry */
572*9e39c5baSBill Taylor 	cqe = &cq->cq_addr[cons_indx];
573*9e39c5baSBill Taylor 
574*9e39c5baSBill Taylor 	/*
575*9e39c5baSBill Taylor 	 * Count entries in the CQ until we find an entry owned by
576*9e39c5baSBill Taylor 	 * the hardware.
577*9e39c5baSBill Taylor 	 */
578*9e39c5baSBill Taylor 	polled_cnt = 0;
579*9e39c5baSBill Taylor 	while (TAVOR_CQE_OWNER_IS_SW(cqe)) {
580*9e39c5baSBill Taylor 		opcode = TAVOR_CQE_OPCODE_GET(cqe);
581*9e39c5baSBill Taylor 		/* Error CQE map to multiple work completions */
582*9e39c5baSBill Taylor 		if ((opcode == TAVOR_CQE_SEND_ERR_OPCODE) ||
583*9e39c5baSBill Taylor 		    (opcode == TAVOR_CQE_RECV_ERR_OPCODE)) {
584*9e39c5baSBill Taylor 			imm_eth_pkey_cred =
585*9e39c5baSBill Taylor 			    TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe);
586*9e39c5baSBill Taylor 			doorbell_cnt =
587*9e39c5baSBill Taylor 			    imm_eth_pkey_cred & TAVOR_CQE_ERR_DBDCNT_MASK;
588*9e39c5baSBill Taylor 			polled_cnt += (doorbell_cnt + 1);
589*9e39c5baSBill Taylor 		} else {
590*9e39c5baSBill Taylor 			polled_cnt++;
591*9e39c5baSBill Taylor 		}
592*9e39c5baSBill Taylor 		/* Increment the consumer index */
593*9e39c5baSBill Taylor 		cons_indx = (cons_indx + 1) & wrap_around_mask;
594*9e39c5baSBill Taylor 
595*9e39c5baSBill Taylor 		/* Update the pointer to the next CQ entry */
596*9e39c5baSBill Taylor 		cqe = &cq->cq_addr[cons_indx];
597*9e39c5baSBill Taylor 	}
598*9e39c5baSBill Taylor 
599*9e39c5baSBill Taylor 	*num_cqe = polled_cnt;
600*9e39c5baSBill Taylor }
601*9e39c5baSBill Taylor 
602*9e39c5baSBill Taylor #define	dapli_arbel_cq_update_ci(cq, dbp) \
603*9e39c5baSBill Taylor 	(dbp)[0] = HTOBE_32(cq->cq_consindx)
604*9e39c5baSBill Taylor 
605*9e39c5baSBill Taylor /*
606*9e39c5baSBill Taylor  * dapli_arbel_cq_poll()
607*9e39c5baSBill Taylor  * This routine polls CQEs out of a CQ and puts them into the ibt_wc_t
608*9e39c5baSBill Taylor  * array that is passed in.
609*9e39c5baSBill Taylor  */
610*9e39c5baSBill Taylor static DAT_RETURN
dapli_arbel_cq_poll(ib_cq_handle_t cq,ibt_wc_t * wc_p,uint_t num_wc,uint_t * num_polled)611*9e39c5baSBill Taylor dapli_arbel_cq_poll(ib_cq_handle_t cq, ibt_wc_t *wc_p, uint_t num_wc,
612*9e39c5baSBill Taylor     uint_t *num_polled)
613*9e39c5baSBill Taylor {
614*9e39c5baSBill Taylor 	tavor_hw_cqe_t		*cqe;
615*9e39c5baSBill Taylor 	uint32_t		cons_indx;
616*9e39c5baSBill Taylor 	uint32_t		wrap_around_mask;
617*9e39c5baSBill Taylor 	uint32_t		polled_cnt;
618*9e39c5baSBill Taylor 	DAT_RETURN		dat_status;
619*9e39c5baSBill Taylor 	int			status;
620*9e39c5baSBill Taylor 
621*9e39c5baSBill Taylor 	/* Get the consumer index */
622*9e39c5baSBill Taylor 	cons_indx = cq->cq_consindx;
623*9e39c5baSBill Taylor 
624*9e39c5baSBill Taylor 	/*
625*9e39c5baSBill Taylor 	 * Calculate the wrap around mask.  Note: This operation only works
626*9e39c5baSBill Taylor 	 * because all Tavor completion queues have power-of-2 sizes
627*9e39c5baSBill Taylor 	 */
628*9e39c5baSBill Taylor 	wrap_around_mask = (cq->cq_size - 1);
629*9e39c5baSBill Taylor 
630*9e39c5baSBill Taylor 	/* Calculate the pointer to the first CQ entry */
631*9e39c5baSBill Taylor 	cqe = &cq->cq_addr[cons_indx];
632*9e39c5baSBill Taylor 
633*9e39c5baSBill Taylor 	/*
634*9e39c5baSBill Taylor 	 * Keep pulling entries from the CQ until we find an entry owned by
635*9e39c5baSBill Taylor 	 * the hardware.  As long as there the CQE's owned by SW, process
636*9e39c5baSBill Taylor 	 * each entry by calling dapli_arbel_cq_cqe_consume() and updating the
637*9e39c5baSBill Taylor 	 * CQ consumer index.  Note:  We only update the consumer index if
638*9e39c5baSBill Taylor 	 * dapli_arbel_cq_cqe_consume() returns TAVOR_CQ_SYNC_AND_DB.
639*9e39c5baSBill Taylor 	 * Otherwise, it indicates that we are going to "recycle" the CQE
640*9e39c5baSBill Taylor 	 * (probably because it is a error CQE and corresponds to more than one
641*9e39c5baSBill Taylor 	 * completion).
642*9e39c5baSBill Taylor 	 */
643*9e39c5baSBill Taylor 	polled_cnt = 0;
644*9e39c5baSBill Taylor 	while (TAVOR_CQE_OWNER_IS_SW(cqe)) {
645*9e39c5baSBill Taylor 		status = dapli_arbel_cq_cqe_consume(cq, cqe,
646*9e39c5baSBill Taylor 		    &wc_p[polled_cnt++]);
647*9e39c5baSBill Taylor 		if (status == TAVOR_CQ_SYNC_AND_DB) {
648*9e39c5baSBill Taylor 			/* Reset entry to hardware ownership */
649*9e39c5baSBill Taylor 			TAVOR_CQE_OWNER_SET_HW(cqe);
650*9e39c5baSBill Taylor 
651*9e39c5baSBill Taylor 			/* Increment the consumer index */
652*9e39c5baSBill Taylor 			cons_indx = (cons_indx + 1) & wrap_around_mask;
653*9e39c5baSBill Taylor 
654*9e39c5baSBill Taylor 			/* Update the pointer to the next CQ entry */
655*9e39c5baSBill Taylor 			cqe = &cq->cq_addr[cons_indx];
656*9e39c5baSBill Taylor 		}
657*9e39c5baSBill Taylor 
658*9e39c5baSBill Taylor 		/*
659*9e39c5baSBill Taylor 		 * If we have run out of space to store work completions,
660*9e39c5baSBill Taylor 		 * then stop and return the ones we have pulled of the CQ.
661*9e39c5baSBill Taylor 		 */
662*9e39c5baSBill Taylor 		if (polled_cnt >= num_wc) {
663*9e39c5baSBill Taylor 			break;
664*9e39c5baSBill Taylor 		}
665*9e39c5baSBill Taylor 	}
666*9e39c5baSBill Taylor 
667*9e39c5baSBill Taylor 	dat_status = DAT_SUCCESS;
668*9e39c5baSBill Taylor 	/*
669*9e39c5baSBill Taylor 	 * Now we only ring the doorbell (to update the consumer index) if
670*9e39c5baSBill Taylor 	 * we've actually consumed a CQ entry.  If we have, for example,
671*9e39c5baSBill Taylor 	 * pulled from a CQE that we are still in the process of "recycling"
672*9e39c5baSBill Taylor 	 * for error purposes, then we would not update the consumer index.
673*9e39c5baSBill Taylor 	 */
674*9e39c5baSBill Taylor 	if ((polled_cnt != 0) && (cq->cq_consindx != cons_indx)) {
675*9e39c5baSBill Taylor 		/*
676*9e39c5baSBill Taylor 		 * Update the consumer index in both the CQ handle and the
677*9e39c5baSBill Taylor 		 * doorbell record.
678*9e39c5baSBill Taylor 		 */
679*9e39c5baSBill Taylor 		cq->cq_consindx = cons_indx;
680*9e39c5baSBill Taylor 		dapli_arbel_cq_update_ci(cq, cq->cq_poll_dbp);
681*9e39c5baSBill Taylor 	} else if (polled_cnt == 0) {
682*9e39c5baSBill Taylor 		/*
683*9e39c5baSBill Taylor 		 * If the CQ is empty, we can try to free up some of the WRID
684*9e39c5baSBill Taylor 		 * list containers.
685*9e39c5baSBill Taylor 		 */
686*9e39c5baSBill Taylor 		if (cq->cq_wrid_reap_head)	/* look before leaping */
687*9e39c5baSBill Taylor 			dapls_tavor_wrid_cq_reap(cq);
688*9e39c5baSBill Taylor 		dat_status = DAT_ERROR(DAT_QUEUE_EMPTY, 0);
689*9e39c5baSBill Taylor 	}
690*9e39c5baSBill Taylor 
691*9e39c5baSBill Taylor 	if (num_polled != NULL) {
692*9e39c5baSBill Taylor 		*num_polled = polled_cnt;
693*9e39c5baSBill Taylor 	}
694*9e39c5baSBill Taylor 
695*9e39c5baSBill Taylor 	return (dat_status);
696*9e39c5baSBill Taylor }
697*9e39c5baSBill Taylor 
698*9e39c5baSBill Taylor /*
699*9e39c5baSBill Taylor  * dapli_arbel_cq_poll_one()
700*9e39c5baSBill Taylor  * This routine polls one CQE out of a CQ and puts ot into the ibt_wc_t
701*9e39c5baSBill Taylor  * that is passed in.  See above for more comments/details.
702*9e39c5baSBill Taylor  */
703*9e39c5baSBill Taylor static DAT_RETURN
dapli_arbel_cq_poll_one(ib_cq_handle_t cq,ibt_wc_t * wc_p)704*9e39c5baSBill Taylor dapli_arbel_cq_poll_one(ib_cq_handle_t cq, ibt_wc_t *wc_p)
705*9e39c5baSBill Taylor {
706*9e39c5baSBill Taylor 	tavor_hw_cqe_t		*cqe;
707*9e39c5baSBill Taylor 	uint32_t		cons_indx;
708*9e39c5baSBill Taylor 	DAT_RETURN		dat_status;
709*9e39c5baSBill Taylor 	int			status;
710*9e39c5baSBill Taylor 
711*9e39c5baSBill Taylor 	/* Get the consumer index */
712*9e39c5baSBill Taylor 	cons_indx = cq->cq_consindx;
713*9e39c5baSBill Taylor 
714*9e39c5baSBill Taylor 	/* Calculate the pointer to the first CQ entry */
715*9e39c5baSBill Taylor 	cqe = &cq->cq_addr[cons_indx];
716*9e39c5baSBill Taylor 
717*9e39c5baSBill Taylor 	/*
718*9e39c5baSBill Taylor 	 * Keep pulling entries from the CQ until we find an entry owned by
719*9e39c5baSBill Taylor 	 * the hardware.  As long as there the CQE's owned by SW, process
720*9e39c5baSBill Taylor 	 * each entry by calling dapli_arbel_cq_cqe_consume() and updating the
721*9e39c5baSBill Taylor 	 * CQ consumer index.  Note:  We only update the consumer index if
722*9e39c5baSBill Taylor 	 * dapli_arbel_cq_cqe_consume() returns TAVOR_CQ_SYNC_AND_DB.
723*9e39c5baSBill Taylor 	 * Otherwise, it indicates that we are going to "recycle" the CQE
724*9e39c5baSBill Taylor 	 * (probably because it is a error CQE and corresponds to more than one
725*9e39c5baSBill Taylor 	 * completion).
726*9e39c5baSBill Taylor 	 */
727*9e39c5baSBill Taylor 	if (TAVOR_CQE_OWNER_IS_SW(cqe)) {
728*9e39c5baSBill Taylor 		status = dapli_arbel_cq_cqe_consume(cq, cqe, wc_p);
729*9e39c5baSBill Taylor 		if (status == TAVOR_CQ_SYNC_AND_DB) {
730*9e39c5baSBill Taylor 			/* Reset entry to hardware ownership */
731*9e39c5baSBill Taylor 			TAVOR_CQE_OWNER_SET_HW(cqe);
732*9e39c5baSBill Taylor 
733*9e39c5baSBill Taylor 			/* Increment the consumer index */
734*9e39c5baSBill Taylor 			cq->cq_consindx =
735*9e39c5baSBill Taylor 			    (cons_indx + 1) & (cq->cq_size - 1);
736*9e39c5baSBill Taylor 			dapli_arbel_cq_update_ci(cq, cq->cq_poll_dbp);
737*9e39c5baSBill Taylor 		}
738*9e39c5baSBill Taylor 		dat_status = DAT_SUCCESS;
739*9e39c5baSBill Taylor 	} else {
740*9e39c5baSBill Taylor 		if (cq->cq_wrid_reap_head)	/* look before leaping */
741*9e39c5baSBill Taylor 			dapls_tavor_wrid_cq_reap(cq);
742*9e39c5baSBill Taylor 		dat_status = DAT_ERROR(DAT_QUEUE_EMPTY, 0);
743*9e39c5baSBill Taylor 	}
744*9e39c5baSBill Taylor 	return (dat_status);
745*9e39c5baSBill Taylor }
746*9e39c5baSBill Taylor 
747*9e39c5baSBill Taylor /*
748*9e39c5baSBill Taylor  * dapli_arbel_cq_cqe_consume()
749*9e39c5baSBill Taylor  * Converts a given CQE into a ibt_wc_t object
750*9e39c5baSBill Taylor  */
751*9e39c5baSBill Taylor static int
dapli_arbel_cq_cqe_consume(ib_cq_handle_t cqhdl,tavor_hw_cqe_t * cqe,ibt_wc_t * wc)752*9e39c5baSBill Taylor dapli_arbel_cq_cqe_consume(ib_cq_handle_t cqhdl, tavor_hw_cqe_t *cqe,
753*9e39c5baSBill Taylor     ibt_wc_t *wc)
754*9e39c5baSBill Taylor {
755*9e39c5baSBill Taylor 	uint_t		flags;
756*9e39c5baSBill Taylor 	uint_t		type;
757*9e39c5baSBill Taylor 	uint_t		opcode;
758*9e39c5baSBill Taylor 	int		status;
759*9e39c5baSBill Taylor 
760*9e39c5baSBill Taylor 	/* strip off the size in wqeaddrsz */
761*9e39c5baSBill Taylor 	TAVOR_CQE_WQEADDRSZ_SET(cqe, TAVOR_CQE_WQEADDRSZ_GET(cqe) &
762*9e39c5baSBill Taylor 	    ~ARBEL_WQE_NDS_MASK);
763*9e39c5baSBill Taylor 
764*9e39c5baSBill Taylor 	/*
765*9e39c5baSBill Taylor 	 * Determine if this is an "error" CQE by examining "opcode".  If it
766*9e39c5baSBill Taylor 	 * is an error CQE, then call dapli_arbel_cq_errcqe_consume() and return
767*9e39c5baSBill Taylor 	 * whatever status it returns.  Otherwise, this is a successful
768*9e39c5baSBill Taylor 	 * completion.
769*9e39c5baSBill Taylor 	 */
770*9e39c5baSBill Taylor 	opcode = TAVOR_CQE_OPCODE_GET(cqe);
771*9e39c5baSBill Taylor 	if ((opcode == TAVOR_CQE_SEND_ERR_OPCODE) ||
772*9e39c5baSBill Taylor 	    (opcode == TAVOR_CQE_RECV_ERR_OPCODE)) {
773*9e39c5baSBill Taylor 		status = dapli_arbel_cq_errcqe_consume(cqhdl, cqe, wc);
774*9e39c5baSBill Taylor 		return (status);
775*9e39c5baSBill Taylor 	}
776*9e39c5baSBill Taylor 
777*9e39c5baSBill Taylor 	/*
778*9e39c5baSBill Taylor 	 * Fetch the Work Request ID using the information in the CQE.
779*9e39c5baSBill Taylor 	 * See tavor_wr.c for more details.
780*9e39c5baSBill Taylor 	 */
781*9e39c5baSBill Taylor 	wc->wc_id = dapls_tavor_wrid_get_entry(cqhdl, cqe,
782*9e39c5baSBill Taylor 	    TAVOR_CQE_SENDRECV_GET(cqe), 0, NULL);
783*9e39c5baSBill Taylor 	wc->wc_qpn = TAVOR_CQE_QPNUM_GET(cqe);
784*9e39c5baSBill Taylor 
785*9e39c5baSBill Taylor 	/*
786*9e39c5baSBill Taylor 	 * Parse the CQE opcode to determine completion type.  This will set
787*9e39c5baSBill Taylor 	 * not only the type of the completion, but also any flags that might
788*9e39c5baSBill Taylor 	 * be associated with it (e.g. whether immediate data is present).
789*9e39c5baSBill Taylor 	 */
790*9e39c5baSBill Taylor 	flags = IBT_WC_NO_FLAGS;
791*9e39c5baSBill Taylor 	if (TAVOR_CQE_SENDRECV_GET(cqe) != TAVOR_COMPLETION_RECV) {
792*9e39c5baSBill Taylor 
793*9e39c5baSBill Taylor 		/*
794*9e39c5baSBill Taylor 		 * Send CQE
795*9e39c5baSBill Taylor 		 *
796*9e39c5baSBill Taylor 		 * The following opcodes will not be generated in uDAPL
797*9e39c5baSBill Taylor 		 * case TAVOR_CQE_SND_RDMAWR_IMM:
798*9e39c5baSBill Taylor 		 * case TAVOR_CQE_SND_SEND_IMM:
799*9e39c5baSBill Taylor 		 * case TAVOR_CQE_SND_ATOMIC_CS:
800*9e39c5baSBill Taylor 		 * case TAVOR_CQE_SND_ATOMIC_FA:
801*9e39c5baSBill Taylor 		 */
802*9e39c5baSBill Taylor 		switch (opcode) {
803*9e39c5baSBill Taylor 		case TAVOR_CQE_SND_RDMAWR:
804*9e39c5baSBill Taylor 			type = IBT_WRC_RDMAW;
805*9e39c5baSBill Taylor 			break;
806*9e39c5baSBill Taylor 
807*9e39c5baSBill Taylor 		case TAVOR_CQE_SND_SEND:
808*9e39c5baSBill Taylor 			type = IBT_WRC_SEND;
809*9e39c5baSBill Taylor 			break;
810*9e39c5baSBill Taylor 
811*9e39c5baSBill Taylor 		case TAVOR_CQE_SND_RDMARD:
812*9e39c5baSBill Taylor 			type = IBT_WRC_RDMAR;
813*9e39c5baSBill Taylor 			wc->wc_bytes_xfer = TAVOR_CQE_BYTECNT_GET(cqe);
814*9e39c5baSBill Taylor 			break;
815*9e39c5baSBill Taylor 
816*9e39c5baSBill Taylor 		case TAVOR_CQE_SND_BIND_MW:
817*9e39c5baSBill Taylor 			type = IBT_WRC_BIND;
818*9e39c5baSBill Taylor 			break;
819*9e39c5baSBill Taylor 
820*9e39c5baSBill Taylor 		default:
821*9e39c5baSBill Taylor 			wc->wc_status = IBT_WC_LOCAL_CHAN_OP_ERR;
822*9e39c5baSBill Taylor 			return (TAVOR_CQ_SYNC_AND_DB);
823*9e39c5baSBill Taylor 		}
824*9e39c5baSBill Taylor 	} else {
825*9e39c5baSBill Taylor 
826*9e39c5baSBill Taylor 		/*
827*9e39c5baSBill Taylor 		 * Receive CQE
828*9e39c5baSBill Taylor 		 *
829*9e39c5baSBill Taylor 		 * The following opcodes will not be generated in uDAPL
830*9e39c5baSBill Taylor 		 *
831*9e39c5baSBill Taylor 		 * case TAVOR_CQE_RCV_RECV_IMM:
832*9e39c5baSBill Taylor 		 * case TAVOR_CQE_RCV_RECV_IMM2:
833*9e39c5baSBill Taylor 		 * case TAVOR_CQE_RCV_RDMAWR_IMM:
834*9e39c5baSBill Taylor 		 * case TAVOR_CQE_RCV_RDMAWR_IMM2:
835*9e39c5baSBill Taylor 		 */
836*9e39c5baSBill Taylor 		switch (opcode & 0x1F) {
837*9e39c5baSBill Taylor 		case TAVOR_CQE_RCV_RECV:
838*9e39c5baSBill Taylor 			/* FALLTHROUGH */
839*9e39c5baSBill Taylor 		case TAVOR_CQE_RCV_RECV2:
840*9e39c5baSBill Taylor 			type = IBT_WRC_RECV;
841*9e39c5baSBill Taylor 			wc->wc_bytes_xfer = TAVOR_CQE_BYTECNT_GET(cqe);
842*9e39c5baSBill Taylor 			break;
843*9e39c5baSBill Taylor 		default:
844*9e39c5baSBill Taylor 			wc->wc_status = IBT_WC_LOCAL_CHAN_OP_ERR;
845*9e39c5baSBill Taylor 			return (TAVOR_CQ_SYNC_AND_DB);
846*9e39c5baSBill Taylor 		}
847*9e39c5baSBill Taylor 	}
848*9e39c5baSBill Taylor 	wc->wc_type = type;
849*9e39c5baSBill Taylor 	wc->wc_flags = flags;
850*9e39c5baSBill Taylor 	/* If we got here, completion status must be success */
851*9e39c5baSBill Taylor 	wc->wc_status = IBT_WC_SUCCESS;
852*9e39c5baSBill Taylor 
853*9e39c5baSBill Taylor 	return (TAVOR_CQ_SYNC_AND_DB);
854*9e39c5baSBill Taylor }
855*9e39c5baSBill Taylor 
856*9e39c5baSBill Taylor 
857*9e39c5baSBill Taylor /*
858*9e39c5baSBill Taylor  * dapli_arbel_cq_errcqe_consume()
859*9e39c5baSBill Taylor  */
860*9e39c5baSBill Taylor static int
dapli_arbel_cq_errcqe_consume(ib_cq_handle_t cqhdl,tavor_hw_cqe_t * cqe,ibt_wc_t * wc)861*9e39c5baSBill Taylor dapli_arbel_cq_errcqe_consume(ib_cq_handle_t cqhdl, tavor_hw_cqe_t *cqe,
862*9e39c5baSBill Taylor     ibt_wc_t *wc)
863*9e39c5baSBill Taylor {
864*9e39c5baSBill Taylor 	dapls_tavor_wrid_entry_t	wre;
865*9e39c5baSBill Taylor 	uint32_t		imm_eth_pkey_cred;
866*9e39c5baSBill Taylor 	uint_t			status;
867*9e39c5baSBill Taylor 	uint_t			opcode = TAVOR_CQE_OPCODE_GET(cqe);
868*9e39c5baSBill Taylor 
869*9e39c5baSBill Taylor 	dapl_dbg_log(DAPL_DBG_TYPE_EVD, "errcqe_consume:cqe.eth=%x, wqe=%x\n",
870*9e39c5baSBill Taylor 	    TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe),
871*9e39c5baSBill Taylor 	    TAVOR_CQE_WQEADDRSZ_GET(cqe));
872*9e39c5baSBill Taylor 
873*9e39c5baSBill Taylor 	/*
874*9e39c5baSBill Taylor 	 * Fetch the Work Request ID using the information in the CQE.
875*9e39c5baSBill Taylor 	 * See tavor_wr.c for more details.
876*9e39c5baSBill Taylor 	 */
877*9e39c5baSBill Taylor 	wc->wc_id = dapls_tavor_wrid_get_entry(cqhdl, cqe,
878*9e39c5baSBill Taylor 	    (opcode == TAVOR_CQE_SEND_ERR_OPCODE) ? TAVOR_COMPLETION_SEND :
879*9e39c5baSBill Taylor 	    TAVOR_COMPLETION_RECV, 1, &wre);
880*9e39c5baSBill Taylor 	wc->wc_qpn = TAVOR_CQE_QPNUM_GET(cqe);
881*9e39c5baSBill Taylor 
882*9e39c5baSBill Taylor 	/*
883*9e39c5baSBill Taylor 	 * Parse the CQE opcode to determine completion type.  We know that
884*9e39c5baSBill Taylor 	 * the CQE is an error completion, so we extract only the completion
885*9e39c5baSBill Taylor 	 * status here.
886*9e39c5baSBill Taylor 	 */
887*9e39c5baSBill Taylor 	imm_eth_pkey_cred = TAVOR_CQE_IMM_ETH_PKEY_CRED_GET(cqe);
888*9e39c5baSBill Taylor 	status = imm_eth_pkey_cred >> TAVOR_CQE_ERR_STATUS_SHIFT;
889*9e39c5baSBill Taylor 	switch (status) {
890*9e39c5baSBill Taylor 	case TAVOR_CQE_LOC_LEN_ERR:
891*9e39c5baSBill Taylor 		status = IBT_WC_LOCAL_LEN_ERR;
892*9e39c5baSBill Taylor 		break;
893*9e39c5baSBill Taylor 
894*9e39c5baSBill Taylor 	case TAVOR_CQE_LOC_OP_ERR:
895*9e39c5baSBill Taylor 		status = IBT_WC_LOCAL_CHAN_OP_ERR;
896*9e39c5baSBill Taylor 		break;
897*9e39c5baSBill Taylor 
898*9e39c5baSBill Taylor 	case TAVOR_CQE_LOC_PROT_ERR:
899*9e39c5baSBill Taylor 		status = IBT_WC_LOCAL_PROTECT_ERR;
900*9e39c5baSBill Taylor 		break;
901*9e39c5baSBill Taylor 
902*9e39c5baSBill Taylor 	case TAVOR_CQE_WR_FLUSHED_ERR:
903*9e39c5baSBill Taylor 		status = IBT_WC_WR_FLUSHED_ERR;
904*9e39c5baSBill Taylor 		break;
905*9e39c5baSBill Taylor 
906*9e39c5baSBill Taylor 	case TAVOR_CQE_MW_BIND_ERR:
907*9e39c5baSBill Taylor 		status = IBT_WC_MEM_WIN_BIND_ERR;
908*9e39c5baSBill Taylor 		break;
909*9e39c5baSBill Taylor 
910*9e39c5baSBill Taylor 	case TAVOR_CQE_BAD_RESPONSE_ERR:
911*9e39c5baSBill Taylor 		status = IBT_WC_BAD_RESPONSE_ERR;
912*9e39c5baSBill Taylor 		break;
913*9e39c5baSBill Taylor 
914*9e39c5baSBill Taylor 	case TAVOR_CQE_LOCAL_ACCESS_ERR:
915*9e39c5baSBill Taylor 		status = IBT_WC_LOCAL_ACCESS_ERR;
916*9e39c5baSBill Taylor 		break;
917*9e39c5baSBill Taylor 
918*9e39c5baSBill Taylor 	case TAVOR_CQE_REM_INV_REQ_ERR:
919*9e39c5baSBill Taylor 		status = IBT_WC_REMOTE_INVALID_REQ_ERR;
920*9e39c5baSBill Taylor 		break;
921*9e39c5baSBill Taylor 
922*9e39c5baSBill Taylor 	case TAVOR_CQE_REM_ACC_ERR:
923*9e39c5baSBill Taylor 		status = IBT_WC_REMOTE_ACCESS_ERR;
924*9e39c5baSBill Taylor 		break;
925*9e39c5baSBill Taylor 
926*9e39c5baSBill Taylor 	case TAVOR_CQE_REM_OP_ERR:
927*9e39c5baSBill Taylor 		status = IBT_WC_REMOTE_OP_ERR;
928*9e39c5baSBill Taylor 		break;
929*9e39c5baSBill Taylor 
930*9e39c5baSBill Taylor 	case TAVOR_CQE_TRANS_TO_ERR:
931*9e39c5baSBill Taylor 		status = IBT_WC_TRANS_TIMEOUT_ERR;
932*9e39c5baSBill Taylor 		break;
933*9e39c5baSBill Taylor 
934*9e39c5baSBill Taylor 	case TAVOR_CQE_RNRNAK_TO_ERR:
935*9e39c5baSBill Taylor 		status = IBT_WC_RNR_NAK_TIMEOUT_ERR;
936*9e39c5baSBill Taylor 		break;
937*9e39c5baSBill Taylor 
938*9e39c5baSBill Taylor 	/*
939*9e39c5baSBill Taylor 	 * The following error codes are not supported in the Tavor driver
940*9e39c5baSBill Taylor 	 * as they relate only to Reliable Datagram completion statuses:
941*9e39c5baSBill Taylor 	 *    case TAVOR_CQE_LOCAL_RDD_VIO_ERR:
942*9e39c5baSBill Taylor 	 *    case TAVOR_CQE_REM_INV_RD_REQ_ERR:
943*9e39c5baSBill Taylor 	 *    case TAVOR_CQE_EEC_REM_ABORTED_ERR:
944*9e39c5baSBill Taylor 	 *    case TAVOR_CQE_INV_EEC_NUM_ERR:
945*9e39c5baSBill Taylor 	 *    case TAVOR_CQE_INV_EEC_STATE_ERR:
946*9e39c5baSBill Taylor 	 *    case TAVOR_CQE_LOC_EEC_ERR:
947*9e39c5baSBill Taylor 	 */
948*9e39c5baSBill Taylor 
949*9e39c5baSBill Taylor 	default:
950*9e39c5baSBill Taylor 		status = IBT_WC_LOCAL_CHAN_OP_ERR;
951*9e39c5baSBill Taylor 		break;
952*9e39c5baSBill Taylor 	}
953*9e39c5baSBill Taylor 	wc->wc_status = status;
954*9e39c5baSBill Taylor 	wc->wc_type = 0;
955*9e39c5baSBill Taylor 
956*9e39c5baSBill Taylor 	/*
957*9e39c5baSBill Taylor 	 * Consume the CQE
958*9e39c5baSBill Taylor 	 *    Return status to indicate that doorbell and sync may be
959*9e39c5baSBill Taylor 	 *    necessary.
960*9e39c5baSBill Taylor 	 */
961*9e39c5baSBill Taylor 	return (TAVOR_CQ_SYNC_AND_DB);
962*9e39c5baSBill Taylor }
963*9e39c5baSBill Taylor 
964*9e39c5baSBill Taylor /*
965*9e39c5baSBill Taylor  * dapli_arbel_cq_notify()
966*9e39c5baSBill Taylor  * This function is used for arming the CQ by ringing the CQ doorbell.
967*9e39c5baSBill Taylor  *
968*9e39c5baSBill Taylor  * Note: there is something very subtle here.  This code assumes a very
969*9e39c5baSBill Taylor  * specific behavior of the kernel driver.  The cmd_sn field of the
970*9e39c5baSBill Taylor  * arm_dbr is updated by the kernel driver whenever a notification
971*9e39c5baSBill Taylor  * event for the cq is received.  This code extracts the cmd_sn field
972*9e39c5baSBill Taylor  * from the arm_dbr to know the right value to use.  The arm_dbr is
973*9e39c5baSBill Taylor  * always updated atomically so that neither the kernel driver nor this
974*9e39c5baSBill Taylor  * will get confused about what the other is doing.
975*9e39c5baSBill Taylor  *
976*9e39c5baSBill Taylor  * Note: param is not used here.  It is necessary for arming a CQ for
977*9e39c5baSBill Taylor  * N completions (param is N), but no uDAPL API supports this for now.
978*9e39c5baSBill Taylor  * Thus, we declare ARGSUSED to make lint happy.
979*9e39c5baSBill Taylor  */
980*9e39c5baSBill Taylor /*ARGSUSED*/
981*9e39c5baSBill Taylor static DAT_RETURN
dapli_arbel_cq_notify(ib_cq_handle_t cq,int flags,uint32_t param)982*9e39c5baSBill Taylor dapli_arbel_cq_notify(ib_cq_handle_t cq, int flags, uint32_t param)
983*9e39c5baSBill Taylor {
984*9e39c5baSBill Taylor 	uint32_t	cqnum;
985*9e39c5baSBill Taylor 	uint32_t	*target;
986*9e39c5baSBill Taylor 	uint32_t	old_cmd, cmp, new, tmp, cmd_sn;
987*9e39c5baSBill Taylor 
988*9e39c5baSBill Taylor 	/*
989*9e39c5baSBill Taylor 	 * Determine if we are trying to get the next completion or the next
990*9e39c5baSBill Taylor 	 * "solicited" completion.  Then hit the appropriate doorbell.
991*9e39c5baSBill Taylor 	 */
992*9e39c5baSBill Taylor 	dapli_arbel_cq_update_ci(cq, cq->cq_arm_dbp);
993*9e39c5baSBill Taylor 	cqnum = cq->cq_num;
994*9e39c5baSBill Taylor 	target = cq->cq_arm_dbp + 1;
995*9e39c5baSBill Taylor retry:
996*9e39c5baSBill Taylor 	cmp = *target;
997*9e39c5baSBill Taylor 	tmp = HTOBE_32(cmp);
998*9e39c5baSBill Taylor 	old_cmd = tmp & 0x7;
999*9e39c5baSBill Taylor 	cmd_sn = (tmp & 0x18) >> 3;
1000*9e39c5baSBill Taylor 
1001*9e39c5baSBill Taylor 	if (flags == IB_NOTIFY_ON_NEXT_COMP) {
1002*9e39c5baSBill Taylor 		if (old_cmd != ARBEL_CQDB_NOTIFY_CQ) {
1003*9e39c5baSBill Taylor 			new = HTOBE_32((tmp & ~0x7) | ARBEL_CQDB_NOTIFY_CQ);
1004*9e39c5baSBill Taylor 			tmp = atomic_cas_32(target, cmp, new);
1005*9e39c5baSBill Taylor 			if (tmp != cmp)
1006*9e39c5baSBill Taylor 				goto retry;
1007*9e39c5baSBill Taylor 			dapli_arbel_cq_doorbell(cq->cq_iauar,
1008*9e39c5baSBill Taylor 			    ARBEL_CQDB_NOTIFY_CQ, cqnum,
1009*9e39c5baSBill Taylor 			    cmd_sn, cq->cq_consindx);
1010*9e39c5baSBill Taylor 		} /* else it's already armed */
1011*9e39c5baSBill Taylor 	} else if (flags == IB_NOTIFY_ON_NEXT_SOLICITED) {
1012*9e39c5baSBill Taylor 		if (old_cmd != ARBEL_CQDB_NOTIFY_CQ &&
1013*9e39c5baSBill Taylor 		    old_cmd != ARBEL_CQDB_NOTIFY_CQ_SOLICIT) {
1014*9e39c5baSBill Taylor 			new = HTOBE_32((tmp & ~0x7) |
1015*9e39c5baSBill Taylor 			    ARBEL_CQDB_NOTIFY_CQ_SOLICIT);
1016*9e39c5baSBill Taylor 			tmp = atomic_cas_32(target, cmp, new);
1017*9e39c5baSBill Taylor 			if (tmp != cmp)
1018*9e39c5baSBill Taylor 				goto retry;
1019*9e39c5baSBill Taylor 			dapli_arbel_cq_doorbell(cq->cq_iauar,
1020*9e39c5baSBill Taylor 			    ARBEL_CQDB_NOTIFY_CQ_SOLICIT, cqnum,
1021*9e39c5baSBill Taylor 			    cmd_sn, cq->cq_consindx);
1022*9e39c5baSBill Taylor 		} /* else it's already armed */
1023*9e39c5baSBill Taylor 	} else {
1024*9e39c5baSBill Taylor 		return (DAT_INVALID_PARAMETER);
1025*9e39c5baSBill Taylor 	}
1026*9e39c5baSBill Taylor 
1027*9e39c5baSBill Taylor 	return (DAT_SUCCESS);
1028*9e39c5baSBill Taylor }
1029*9e39c5baSBill Taylor 
1030*9e39c5baSBill Taylor /*
1031*9e39c5baSBill Taylor  * dapli_arbel_post_send()
1032*9e39c5baSBill Taylor  */
1033*9e39c5baSBill Taylor /* ARGSUSED */
1034*9e39c5baSBill Taylor static DAT_RETURN
dapli_arbel_post_send(DAPL_EP * ep,ibt_send_wr_t * wr,boolean_t ns)1035*9e39c5baSBill Taylor dapli_arbel_post_send(DAPL_EP *ep, ibt_send_wr_t *wr, boolean_t ns)
1036*9e39c5baSBill Taylor {
1037*9e39c5baSBill Taylor 	tavor_sw_wqe_dbinfo_t		dbinfo;
1038*9e39c5baSBill Taylor 	dapls_tavor_wrid_list_hdr_t	*wridlist;
1039*9e39c5baSBill Taylor 	dapls_tavor_wrid_entry_t	*wre_last;
1040*9e39c5baSBill Taylor 	uint32_t			desc;
1041*9e39c5baSBill Taylor 	uint64_t			*wqe_addr;
1042*9e39c5baSBill Taylor 	uint32_t			desc_sz;
1043*9e39c5baSBill Taylor 	uint32_t			wqeaddrsz, signaled_dbd;
1044*9e39c5baSBill Taylor 	uint32_t			head, tail, next_tail, qsize_msk;
1045*9e39c5baSBill Taylor 	int				status;
1046*9e39c5baSBill Taylor 	ib_qp_handle_t			qp;
1047*9e39c5baSBill Taylor 
1048*9e39c5baSBill Taylor 	if ((ep->qp_state == IBT_STATE_RESET) ||
1049*9e39c5baSBill Taylor 	    (ep->qp_state == IBT_STATE_INIT) ||
1050*9e39c5baSBill Taylor 	    (ep->qp_state == IBT_STATE_RTR)) {
1051*9e39c5baSBill Taylor 		dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1052*9e39c5baSBill Taylor 		    "post_send: invalid qp_state %d\n", ep->qp_state);
1053*9e39c5baSBill Taylor 		return (DAT_INVALID_STATE);
1054*9e39c5baSBill Taylor 	}
1055*9e39c5baSBill Taylor 
1056*9e39c5baSBill Taylor 	qp = ep->qp_handle;
1057*9e39c5baSBill Taylor 
1058*9e39c5baSBill Taylor 	/* Grab the lock for the WRID list */
1059*9e39c5baSBill Taylor 	dapl_os_lock(&qp->qp_sq_wqhdr->wq_wrid_lock->wrl_lock);
1060*9e39c5baSBill Taylor 	wridlist  = qp->qp_sq_wqhdr->wq_wrid_post;
1061*9e39c5baSBill Taylor 
1062*9e39c5baSBill Taylor 	/* Save away some initial QP state */
1063*9e39c5baSBill Taylor 	qsize_msk = qp->qp_sq_wqhdr->wq_size - 1;
1064*9e39c5baSBill Taylor 	tail	  = qp->qp_sq_wqhdr->wq_tail;
1065*9e39c5baSBill Taylor 	head	  = qp->qp_sq_wqhdr->wq_head;
1066*9e39c5baSBill Taylor 
1067*9e39c5baSBill Taylor 	/*
1068*9e39c5baSBill Taylor 	 * Check for "queue full" condition.  If the queue is already full,
1069*9e39c5baSBill Taylor 	 * then no more WQEs can be posted, return an error
1070*9e39c5baSBill Taylor 	 */
1071*9e39c5baSBill Taylor 	if (qp->qp_sq_wqhdr->wq_full != 0) {
1072*9e39c5baSBill Taylor 		dapl_os_unlock(&qp->qp_sq_wqhdr->wq_wrid_lock->wrl_lock);
1073*9e39c5baSBill Taylor 		return (DAT_INSUFFICIENT_RESOURCES);
1074*9e39c5baSBill Taylor 	}
1075*9e39c5baSBill Taylor 
1076*9e39c5baSBill Taylor 	/*
1077*9e39c5baSBill Taylor 	 * Increment the "tail index" and check for "queue full" condition.
1078*9e39c5baSBill Taylor 	 * If we detect that the current work request is going to fill the
1079*9e39c5baSBill Taylor 	 * work queue, then we mark this condition and continue.
1080*9e39c5baSBill Taylor 	 */
1081*9e39c5baSBill Taylor 	next_tail = (tail + 1) & qsize_msk;
1082*9e39c5baSBill Taylor 	if (next_tail == head) {
1083*9e39c5baSBill Taylor 		qp->qp_sq_wqhdr->wq_full = 1;
1084*9e39c5baSBill Taylor 	}
1085*9e39c5baSBill Taylor 
1086*9e39c5baSBill Taylor 	/*
1087*9e39c5baSBill Taylor 	 * Get the user virtual address of the location where the next
1088*9e39c5baSBill Taylor 	 * Send WQE should be built
1089*9e39c5baSBill Taylor 	 */
1090*9e39c5baSBill Taylor 	wqe_addr = TAVOR_QP_SQ_ENTRY(qp, tail);
1091*9e39c5baSBill Taylor 
1092*9e39c5baSBill Taylor 	/*
1093*9e39c5baSBill Taylor 	 * Call tavor_wqe_send_build() to build the WQE at the given address.
1094*9e39c5baSBill Taylor 	 * This routine uses the information in the ibt_send_wr_t and
1095*9e39c5baSBill Taylor 	 * returns the size of the WQE when it returns.
1096*9e39c5baSBill Taylor 	 */
1097*9e39c5baSBill Taylor 	status = dapli_arbel_wqe_send_build(qp, wr, wqe_addr, &desc_sz);
1098*9e39c5baSBill Taylor 	if (status != DAT_SUCCESS) {
1099*9e39c5baSBill Taylor 		dapl_os_unlock(&qp->qp_sq_wqhdr->wq_wrid_lock->wrl_lock);
1100*9e39c5baSBill Taylor 		return (status);
1101*9e39c5baSBill Taylor 	}
1102*9e39c5baSBill Taylor 
1103*9e39c5baSBill Taylor 	/*
1104*9e39c5baSBill Taylor 	 * Get the descriptor (io address) corresponding to the location
1105*9e39c5baSBill Taylor 	 * Send WQE was built.
1106*9e39c5baSBill Taylor 	 */
1107*9e39c5baSBill Taylor 	desc = TAVOR_QP_SQ_DESC(qp, tail);
1108*9e39c5baSBill Taylor 
1109*9e39c5baSBill Taylor 	dapl_os_assert(desc >= qp->qp_sq_desc_addr &&
1110*9e39c5baSBill Taylor 	    desc <= (qp->qp_sq_desc_addr +
1111*9e39c5baSBill Taylor 	    qp->qp_sq_numwqe*qp->qp_sq_wqesz));
1112*9e39c5baSBill Taylor 
1113*9e39c5baSBill Taylor 	/*
1114*9e39c5baSBill Taylor 	 * Add a WRID entry to the WRID list.  Need to calculate the
1115*9e39c5baSBill Taylor 	 * "wqeaddr" to pass to dapli_tavor_wrid_add_entry().
1116*9e39c5baSBill Taylor 	 * signaled_dbd is still calculated, but ignored.
1117*9e39c5baSBill Taylor 	 */
1118*9e39c5baSBill Taylor 	wqeaddrsz = TAVOR_QP_WQEADDRSZ(desc, 0);
1119*9e39c5baSBill Taylor 
1120*9e39c5baSBill Taylor 	if (wr->wr_flags & IBT_WR_SEND_SIGNAL) {
1121*9e39c5baSBill Taylor 		signaled_dbd = TAVOR_WRID_ENTRY_SIGNALED;
1122*9e39c5baSBill Taylor 	}
1123*9e39c5baSBill Taylor 
1124*9e39c5baSBill Taylor 	dapli_tavor_wrid_add_entry(qp->qp_sq_wqhdr, wr->wr_id, wqeaddrsz,
1125*9e39c5baSBill Taylor 	    signaled_dbd);
1126*9e39c5baSBill Taylor 
1127*9e39c5baSBill Taylor 	/*
1128*9e39c5baSBill Taylor 	 * Now link the wqe to the old chain (if there was one)
1129*9e39c5baSBill Taylor 	 */
1130*9e39c5baSBill Taylor 	dapli_arbel_wqe_send_linknext(wr, desc, desc_sz,
1131*9e39c5baSBill Taylor 	    qp->qp_sq_lastwqeaddr, &dbinfo);
1132*9e39c5baSBill Taylor 
1133*9e39c5baSBill Taylor 	/*
1134*9e39c5baSBill Taylor 	 * Now if the WRID tail entry is non-NULL, then this
1135*9e39c5baSBill Taylor 	 * represents the entry to which we are chaining the
1136*9e39c5baSBill Taylor 	 * new entries.  Since we are going to ring the
1137*9e39c5baSBill Taylor 	 * doorbell for this WQE, we want set its "dbd" bit.
1138*9e39c5baSBill Taylor 	 *
1139*9e39c5baSBill Taylor 	 * On the other hand, if the tail is NULL, even though
1140*9e39c5baSBill Taylor 	 * we will have rung the doorbell for the previous WQE
1141*9e39c5baSBill Taylor 	 * (for the hardware's sake) it is irrelevant to our
1142*9e39c5baSBill Taylor 	 * purposes (for tracking WRIDs) because we know the
1143*9e39c5baSBill Taylor 	 * request must have already completed.
1144*9e39c5baSBill Taylor 	 */
1145*9e39c5baSBill Taylor 	wre_last = wridlist->wl_wre_old_tail;
1146*9e39c5baSBill Taylor 	if (wre_last != NULL) {
1147*9e39c5baSBill Taylor 		wre_last->wr_signaled_dbd |= TAVOR_WRID_ENTRY_DOORBELLED;
1148*9e39c5baSBill Taylor 	}
1149*9e39c5baSBill Taylor 
1150*9e39c5baSBill Taylor 	/* Update some of the state in the QP */
1151*9e39c5baSBill Taylor 	qp->qp_sq_lastwqeaddr	 = wqe_addr;
1152*9e39c5baSBill Taylor 	qp->qp_sq_wqhdr->wq_tail = next_tail;
1153*9e39c5baSBill Taylor 
1154*9e39c5baSBill Taylor 	/* Set the doorbell decord */
1155*9e39c5baSBill Taylor 	dapli_arbel_sq_dbrec(qp, qp->qp_sq_counter);
1156*9e39c5baSBill Taylor 
1157*9e39c5baSBill Taylor 	/* Ring the doorbell */
1158*9e39c5baSBill Taylor 	dapli_arbel_sq_dbreg(qp->qp_iauar, qp->qp_num, dbinfo.db_fence,
1159*9e39c5baSBill Taylor 	    dbinfo.db_nopcode, qp->qp_sq_counter, desc_sz);
1160*9e39c5baSBill Taylor 	qp->qp_sq_counter++;
1161*9e39c5baSBill Taylor 
1162*9e39c5baSBill Taylor 	dapl_os_unlock(&qp->qp_sq_wqhdr->wq_wrid_lock->wrl_lock);
1163*9e39c5baSBill Taylor 
1164*9e39c5baSBill Taylor 	return (DAT_SUCCESS);
1165*9e39c5baSBill Taylor }
1166*9e39c5baSBill Taylor 
1167*9e39c5baSBill Taylor /*
1168*9e39c5baSBill Taylor  * dapli_arbel_post_recv()
1169*9e39c5baSBill Taylor  */
1170*9e39c5baSBill Taylor /* ARGSUSED */
1171*9e39c5baSBill Taylor static DAT_RETURN
dapli_arbel_post_recv(DAPL_EP * ep,ibt_recv_wr_t * wr,boolean_t ns)1172*9e39c5baSBill Taylor dapli_arbel_post_recv(DAPL_EP	*ep, ibt_recv_wr_t *wr, boolean_t ns)
1173*9e39c5baSBill Taylor {
1174*9e39c5baSBill Taylor 	dapls_tavor_wrid_list_hdr_t	*wridlist;
1175*9e39c5baSBill Taylor 	dapls_tavor_wrid_entry_t	*wre_last;
1176*9e39c5baSBill Taylor 	ib_qp_handle_t			qp;
1177*9e39c5baSBill Taylor 	DAT_RETURN			status;
1178*9e39c5baSBill Taylor 	uint32_t			desc;
1179*9e39c5baSBill Taylor 	uint64_t			*wqe_addr;
1180*9e39c5baSBill Taylor 	uint32_t			desc_sz;
1181*9e39c5baSBill Taylor 	uint32_t			wqeaddrsz;
1182*9e39c5baSBill Taylor 	uint32_t			head, tail, next_tail, qsize_msk;
1183*9e39c5baSBill Taylor 
1184*9e39c5baSBill Taylor 	if (ep->qp_state == IBT_STATE_RESET) {
1185*9e39c5baSBill Taylor 		dapl_dbg_log(DAPL_DBG_TYPE_ERR,
1186*9e39c5baSBill Taylor 		    "post_recv: invalid qp_state %d\n", ep->qp_state);
1187*9e39c5baSBill Taylor 		return (DAT_INVALID_STATE);
1188*9e39c5baSBill Taylor 	}
1189*9e39c5baSBill Taylor 	qp = ep->qp_handle;
1190*9e39c5baSBill Taylor 
1191*9e39c5baSBill Taylor 	/* Grab the lock for the WRID list */
1192*9e39c5baSBill Taylor 	dapl_os_lock(&qp->qp_rq_wqhdr->wq_wrid_lock->wrl_lock);
1193*9e39c5baSBill Taylor 	wridlist  = qp->qp_rq_wqhdr->wq_wrid_post;
1194*9e39c5baSBill Taylor 
1195*9e39c5baSBill Taylor 	/* Save away some initial QP state */
1196*9e39c5baSBill Taylor 	qsize_msk = qp->qp_rq_wqhdr->wq_size - 1;
1197*9e39c5baSBill Taylor 	tail	  = qp->qp_rq_wqhdr->wq_tail;
1198*9e39c5baSBill Taylor 	head	  = qp->qp_rq_wqhdr->wq_head;
1199*9e39c5baSBill Taylor 
1200*9e39c5baSBill Taylor 	/*
1201*9e39c5baSBill Taylor 	 * For the ibt_recv_wr_t passed in, parse the request and build a
1202*9e39c5baSBill Taylor 	 * Recv WQE. Link the WQE with the previous WQE and ring the
1203*9e39c5baSBill Taylor 	 * door bell.
1204*9e39c5baSBill Taylor 	 */
1205*9e39c5baSBill Taylor 
1206*9e39c5baSBill Taylor 	/*
1207*9e39c5baSBill Taylor 	 * Check for "queue full" condition.  If the queue is already full,
1208*9e39c5baSBill Taylor 	 * then no more WQEs can be posted. So return an error.
1209*9e39c5baSBill Taylor 	 */
1210*9e39c5baSBill Taylor 	if (qp->qp_rq_wqhdr->wq_full != 0) {
1211*9e39c5baSBill Taylor 		dapl_os_unlock(&qp->qp_rq_wqhdr->wq_wrid_lock->wrl_lock);
1212*9e39c5baSBill Taylor 		return (DAT_INSUFFICIENT_RESOURCES);
1213*9e39c5baSBill Taylor 	}
1214*9e39c5baSBill Taylor 
1215*9e39c5baSBill Taylor 	/*
1216*9e39c5baSBill Taylor 	 * Increment the "tail index" and check for "queue
1217*9e39c5baSBill Taylor 	 * full" condition.  If we detect that the current
1218*9e39c5baSBill Taylor 	 * work request is going to fill the work queue, then
1219*9e39c5baSBill Taylor 	 * we mark this condition and continue.
1220*9e39c5baSBill Taylor 	 */
1221*9e39c5baSBill Taylor 	next_tail = (tail + 1) & qsize_msk;
1222*9e39c5baSBill Taylor 	if (next_tail == head) {
1223*9e39c5baSBill Taylor 		qp->qp_rq_wqhdr->wq_full = 1;
1224*9e39c5baSBill Taylor 	}
1225*9e39c5baSBill Taylor 
1226*9e39c5baSBill Taylor 	/* Get the descriptor (IO Address) of the WQE to be built */
1227*9e39c5baSBill Taylor 	desc = TAVOR_QP_RQ_DESC(qp, tail);
1228*9e39c5baSBill Taylor 	/* The user virtual address of the WQE to be built */
1229*9e39c5baSBill Taylor 	wqe_addr = TAVOR_QP_RQ_ENTRY(qp, tail);
1230*9e39c5baSBill Taylor 
1231*9e39c5baSBill Taylor 	/*
1232*9e39c5baSBill Taylor 	 * Call tavor_wqe_recv_build() to build the WQE at the given
1233*9e39c5baSBill Taylor 	 * address. This routine uses the information in the
1234*9e39c5baSBill Taylor 	 * ibt_recv_wr_t and returns the size of the WQE.
1235*9e39c5baSBill Taylor 	 */
1236*9e39c5baSBill Taylor 	status = dapli_arbel_wqe_recv_build(qp, wr, wqe_addr, &desc_sz);
1237*9e39c5baSBill Taylor 	if (status != DAT_SUCCESS) {
1238*9e39c5baSBill Taylor 		dapl_os_unlock(&qp->qp_rq_wqhdr->wq_wrid_lock->wrl_lock);
1239*9e39c5baSBill Taylor 		return (DAT_INTERNAL_ERROR);
1240*9e39c5baSBill Taylor 	}
1241*9e39c5baSBill Taylor 
1242*9e39c5baSBill Taylor 	/*
1243*9e39c5baSBill Taylor 	 * Add a WRID entry to the WRID list.  Need to calculate the
1244*9e39c5baSBill Taylor 	 * "wqeaddr" and "signaled_dbd" values to pass to
1245*9e39c5baSBill Taylor 	 * dapli_tavor_wrid_add_entry().
1246*9e39c5baSBill Taylor 	 * Note: all Recv WQEs are essentially "signaled"
1247*9e39c5baSBill Taylor 	 */
1248*9e39c5baSBill Taylor 	wqeaddrsz = TAVOR_QP_WQEADDRSZ(desc, 0);
1249*9e39c5baSBill Taylor 	dapli_tavor_wrid_add_entry(qp->qp_rq_wqhdr, wr->wr_id, wqeaddrsz,
1250*9e39c5baSBill Taylor 	    (uint32_t)TAVOR_WRID_ENTRY_SIGNALED);
1251*9e39c5baSBill Taylor 
1252*9e39c5baSBill Taylor 	/*
1253*9e39c5baSBill Taylor 	 * Now if the WRID tail entry is non-NULL, then this
1254*9e39c5baSBill Taylor 	 * represents the entry to which we are chaining the
1255*9e39c5baSBill Taylor 	 * new entries.  Since we are going to ring the
1256*9e39c5baSBill Taylor 	 * doorbell for this WQE, we want set its "dbd" bit.
1257*9e39c5baSBill Taylor 	 *
1258*9e39c5baSBill Taylor 	 * On the other hand, if the tail is NULL, even though
1259*9e39c5baSBill Taylor 	 * we will have rung the doorbell for the previous WQE
1260*9e39c5baSBill Taylor 	 * (for the hardware's sake) it is irrelevant to our
1261*9e39c5baSBill Taylor 	 * purposes (for tracking WRIDs) because we know the
1262*9e39c5baSBill Taylor 	 * request must have already completed.
1263*9e39c5baSBill Taylor 	 */
1264*9e39c5baSBill Taylor 	wre_last = wridlist->wl_wre_old_tail;
1265*9e39c5baSBill Taylor 	if (wre_last != NULL) {
1266*9e39c5baSBill Taylor 		wre_last->wr_signaled_dbd |= TAVOR_WRID_ENTRY_DOORBELLED;
1267*9e39c5baSBill Taylor 	}
1268*9e39c5baSBill Taylor 
1269*9e39c5baSBill Taylor 	/* Update some of the state in the QP */
1270*9e39c5baSBill Taylor 	qp->qp_rq_lastwqeaddr	 = wqe_addr;
1271*9e39c5baSBill Taylor 	qp->qp_rq_wqhdr->wq_tail = next_tail;
1272*9e39c5baSBill Taylor 
1273*9e39c5baSBill Taylor 	/* Update the doorbell record */
1274*9e39c5baSBill Taylor 	qp->qp_rq_counter++;
1275*9e39c5baSBill Taylor 	(qp->qp_rq_dbp)[0] = HTOBE_32(qp->qp_rq_counter);
1276*9e39c5baSBill Taylor 
1277*9e39c5baSBill Taylor 	dapl_os_unlock(&qp->qp_rq_wqhdr->wq_wrid_lock->wrl_lock);
1278*9e39c5baSBill Taylor 
1279*9e39c5baSBill Taylor 	return (DAT_SUCCESS);
1280*9e39c5baSBill Taylor }
1281*9e39c5baSBill Taylor 
1282*9e39c5baSBill Taylor /*
1283*9e39c5baSBill Taylor  * dapli_arbel_post_srq()
1284*9e39c5baSBill Taylor  */
1285*9e39c5baSBill Taylor /* ARGSUSED */
1286*9e39c5baSBill Taylor static DAT_RETURN
dapli_arbel_post_srq(DAPL_SRQ * srqp,ibt_recv_wr_t * wr,boolean_t ns)1287*9e39c5baSBill Taylor dapli_arbel_post_srq(DAPL_SRQ *srqp, ibt_recv_wr_t *wr, boolean_t ns)
1288*9e39c5baSBill Taylor {
1289*9e39c5baSBill Taylor 	ib_srq_handle_t			srq;
1290*9e39c5baSBill Taylor 	DAT_RETURN			status;
1291*9e39c5baSBill Taylor 	uint32_t			desc;
1292*9e39c5baSBill Taylor 	uint64_t			*wqe_addr;
1293*9e39c5baSBill Taylor 	uint32_t			head, next_head, qsize_msk;
1294*9e39c5baSBill Taylor 	uint32_t			wqe_index;
1295*9e39c5baSBill Taylor 
1296*9e39c5baSBill Taylor 
1297*9e39c5baSBill Taylor 	srq = srqp->srq_handle;
1298*9e39c5baSBill Taylor 
1299*9e39c5baSBill Taylor 	/* Grab the lock for the WRID list */
1300*9e39c5baSBill Taylor 	dapl_os_lock(&srq->srq_wridlist->wl_lock->wrl_lock);
1301*9e39c5baSBill Taylor 
1302*9e39c5baSBill Taylor 	/*
1303*9e39c5baSBill Taylor 	 * For the ibt_recv_wr_t passed in, parse the request and build a
1304*9e39c5baSBill Taylor 	 * Recv WQE. Link the WQE with the previous WQE and ring the
1305*9e39c5baSBill Taylor 	 * door bell.
1306*9e39c5baSBill Taylor 	 */
1307*9e39c5baSBill Taylor 
1308*9e39c5baSBill Taylor 	/*
1309*9e39c5baSBill Taylor 	 * Check for "queue full" condition.  If the queue is already full,
1310*9e39c5baSBill Taylor 	 * ie. there are no free entries, then no more WQEs can be posted.
1311*9e39c5baSBill Taylor 	 * So return an error.
1312*9e39c5baSBill Taylor 	 */
1313*9e39c5baSBill Taylor 	if (srq->srq_wridlist->wl_freel_entries == 0) {
1314*9e39c5baSBill Taylor 		dapl_os_unlock(&srq->srq_wridlist->wl_lock->wrl_lock);
1315*9e39c5baSBill Taylor 		return (DAT_INSUFFICIENT_RESOURCES);
1316*9e39c5baSBill Taylor 	}
1317*9e39c5baSBill Taylor 
1318*9e39c5baSBill Taylor 	/* Save away some initial SRQ state */
1319*9e39c5baSBill Taylor 	qsize_msk = srq->srq_wridlist->wl_size - 1;
1320*9e39c5baSBill Taylor 	head	  = srq->srq_wridlist->wl_freel_head;
1321*9e39c5baSBill Taylor 
1322*9e39c5baSBill Taylor 	next_head = (head + 1) & qsize_msk;
1323*9e39c5baSBill Taylor 
1324*9e39c5baSBill Taylor 	/* Get the descriptor (IO Address) of the WQE to be built */
1325*9e39c5baSBill Taylor 	desc = srq->srq_wridlist->wl_free_list[head];
1326*9e39c5baSBill Taylor 
1327*9e39c5baSBill Taylor 	wqe_index = TAVOR_SRQ_WQ_INDEX(srq->srq_wq_desc_addr, desc,
1328*9e39c5baSBill Taylor 	    srq->srq_wq_wqesz);
1329*9e39c5baSBill Taylor 
1330*9e39c5baSBill Taylor 	/* The user virtual address of the WQE to be built */
1331*9e39c5baSBill Taylor 	wqe_addr = TAVOR_SRQ_WQ_ENTRY(srq, wqe_index);
1332*9e39c5baSBill Taylor 
1333*9e39c5baSBill Taylor 	/*
1334*9e39c5baSBill Taylor 	 * Call dapli_arbel_wqe_srq_build() to build the WQE at the given
1335*9e39c5baSBill Taylor 	 * address. This routine uses the information in the
1336*9e39c5baSBill Taylor 	 * ibt_recv_wr_t and returns the size of the WQE.
1337*9e39c5baSBill Taylor 	 */
1338*9e39c5baSBill Taylor 	status = dapli_arbel_wqe_srq_build(srq, wr, wqe_addr);
1339*9e39c5baSBill Taylor 	if (status != DAT_SUCCESS) {
1340*9e39c5baSBill Taylor 		dapl_os_unlock(&srq->srq_wridlist->wl_lock->wrl_lock);
1341*9e39c5baSBill Taylor 		return (status);
1342*9e39c5baSBill Taylor 	}
1343*9e39c5baSBill Taylor 
1344*9e39c5baSBill Taylor 	/*
1345*9e39c5baSBill Taylor 	 * Add a WRID entry to the WRID list.
1346*9e39c5baSBill Taylor 	 */
1347*9e39c5baSBill Taylor 	dapli_tavor_wrid_add_entry_srq(srq, wr->wr_id, wqe_index);
1348*9e39c5baSBill Taylor 
1349*9e39c5baSBill Taylor #if 0
1350*9e39c5baSBill Taylor 	if (srq->srq_wq_lastwqeindex == -1) {
1351*9e39c5baSBill Taylor 		last_wqe_addr = NULL;
1352*9e39c5baSBill Taylor 	} else {
1353*9e39c5baSBill Taylor 		last_wqe_addr = TAVOR_SRQ_WQ_ENTRY(srq,
1354*9e39c5baSBill Taylor 		    srq->srq_wq_lastwqeindex);
1355*9e39c5baSBill Taylor 	}
1356*9e39c5baSBill Taylor 	/*
1357*9e39c5baSBill Taylor 	 * Now link the chain to the old chain (if there was one)
1358*9e39c5baSBill Taylor 	 * and update the wqe_counter in the doorbell record.
1359*9e39c5baSBill Taylor 	 */
1360*9e39c5baSBill Taylor XXX
1361*9e39c5baSBill Taylor 	dapli_tavor_wqe_srq_linknext(wqe_addr, ns, desc, last_wqe_addr);
1362*9e39c5baSBill Taylor #endif
1363*9e39c5baSBill Taylor 
1364*9e39c5baSBill Taylor 	/* Update some of the state in the SRQ */
1365*9e39c5baSBill Taylor 	srq->srq_wq_lastwqeindex	 = wqe_index;
1366*9e39c5baSBill Taylor 	srq->srq_wridlist->wl_freel_head = next_head;
1367*9e39c5baSBill Taylor 	srq->srq_wridlist->wl_freel_entries--;
1368*9e39c5baSBill Taylor 	dapl_os_assert(srq->srq_wridlist->wl_freel_entries <=
1369*9e39c5baSBill Taylor 	    srq->srq_wridlist->wl_size);
1370*9e39c5baSBill Taylor 
1371*9e39c5baSBill Taylor 	/* Update the doorbell record */
1372*9e39c5baSBill Taylor 	srq->srq_counter++;
1373*9e39c5baSBill Taylor 	(srq->srq_dbp)[0] = HTOBE_32(srq->srq_counter);
1374*9e39c5baSBill Taylor 
1375*9e39c5baSBill Taylor 	dapl_os_unlock(&srq->srq_wridlist->wl_lock->wrl_lock);
1376*9e39c5baSBill Taylor 
1377*9e39c5baSBill Taylor 	return (DAT_SUCCESS);
1378*9e39c5baSBill Taylor }
1379*9e39c5baSBill Taylor 
1380*9e39c5baSBill Taylor /*
1381*9e39c5baSBill Taylor  * dapli_arbel_cq_srq_entries_flush()
1382*9e39c5baSBill Taylor  */
1383*9e39c5baSBill Taylor static void
dapli_arbel_cq_srq_entries_flush(ib_qp_handle_t qp)1384*9e39c5baSBill Taylor dapli_arbel_cq_srq_entries_flush(ib_qp_handle_t qp)
1385*9e39c5baSBill Taylor {
1386*9e39c5baSBill Taylor 	ib_cq_handle_t		cq;
1387*9e39c5baSBill Taylor 	dapls_tavor_workq_hdr_t	*wqhdr;
1388*9e39c5baSBill Taylor 	tavor_hw_cqe_t		*cqe;
1389*9e39c5baSBill Taylor 	tavor_hw_cqe_t		*next_cqe;
1390*9e39c5baSBill Taylor 	uint32_t		cons_indx, tail_cons_indx, wrap_around_mask;
1391*9e39c5baSBill Taylor 	uint32_t		new_indx, check_indx, indx;
1392*9e39c5baSBill Taylor 	int			cqe_qpnum, cqe_type;
1393*9e39c5baSBill Taylor 	int			outstanding_cqes, removed_cqes;
1394*9e39c5baSBill Taylor 	int			i;
1395*9e39c5baSBill Taylor 
1396*9e39c5baSBill Taylor 	/* ASSERT(MUTEX_HELD(&qp->qp_rq_cqhdl->cq_lock)); */
1397*9e39c5baSBill Taylor 
1398*9e39c5baSBill Taylor 	cq = qp->qp_rq_cqhdl;
1399*9e39c5baSBill Taylor 	wqhdr = qp->qp_rq_wqhdr;
1400*9e39c5baSBill Taylor 
1401*9e39c5baSBill Taylor 	dapl_os_assert(wqhdr->wq_wrid_post != NULL);
1402*9e39c5baSBill Taylor 	dapl_os_assert(wqhdr->wq_wrid_post->wl_srq_en != 0);
1403*9e39c5baSBill Taylor 
1404*9e39c5baSBill Taylor 	/* Get the consumer index */
1405*9e39c5baSBill Taylor 	cons_indx = cq->cq_consindx;
1406*9e39c5baSBill Taylor 
1407*9e39c5baSBill Taylor 	/*
1408*9e39c5baSBill Taylor 	 * Calculate the wrap around mask.  Note: This operation only works
1409*9e39c5baSBill Taylor 	 * because all Tavor completion queues have power-of-2 sizes
1410*9e39c5baSBill Taylor 	 */
1411*9e39c5baSBill Taylor 	wrap_around_mask = (cq->cq_size - 1);
1412*9e39c5baSBill Taylor 
1413*9e39c5baSBill Taylor 	/* Calculate the pointer to the first CQ entry */
1414*9e39c5baSBill Taylor 	cqe = &cq->cq_addr[cons_indx];
1415*9e39c5baSBill Taylor 
1416*9e39c5baSBill Taylor 	/*
1417*9e39c5baSBill Taylor 	 * Loop through the CQ looking for entries owned by software.  If an
1418*9e39c5baSBill Taylor 	 * entry is owned by software then we increment an 'outstanding_cqes'
1419*9e39c5baSBill Taylor 	 * count to know how many entries total we have on our CQ.  We use this
1420*9e39c5baSBill Taylor 	 * value further down to know how many entries to loop through looking
1421*9e39c5baSBill Taylor 	 * for our same QP number.
1422*9e39c5baSBill Taylor 	 */
1423*9e39c5baSBill Taylor 	outstanding_cqes = 0;
1424*9e39c5baSBill Taylor 	tail_cons_indx = cons_indx;
1425*9e39c5baSBill Taylor 	while (TAVOR_CQE_OWNER_IS_SW(cqe)) {
1426*9e39c5baSBill Taylor 		/* increment total cqes count */
1427*9e39c5baSBill Taylor 		outstanding_cqes++;
1428*9e39c5baSBill Taylor 
1429*9e39c5baSBill Taylor 		/* increment the consumer index */
1430*9e39c5baSBill Taylor 		tail_cons_indx = (tail_cons_indx + 1) & wrap_around_mask;
1431*9e39c5baSBill Taylor 
1432*9e39c5baSBill Taylor 		/* update the pointer to the next cq entry */
1433*9e39c5baSBill Taylor 		cqe = &cq->cq_addr[tail_cons_indx];
1434*9e39c5baSBill Taylor 	}
1435*9e39c5baSBill Taylor 
1436*9e39c5baSBill Taylor 	/*
1437*9e39c5baSBill Taylor 	 * Using the 'tail_cons_indx' that was just set, we now know how many
1438*9e39c5baSBill Taylor 	 * total CQEs possible there are.  Set the 'check_indx' and the
1439*9e39c5baSBill Taylor 	 * 'new_indx' to the last entry identified by 'tail_cons_indx'
1440*9e39c5baSBill Taylor 	 */
1441*9e39c5baSBill Taylor 	check_indx = new_indx = (tail_cons_indx - 1) & wrap_around_mask;
1442*9e39c5baSBill Taylor 
1443*9e39c5baSBill Taylor 	for (i = 0; i < outstanding_cqes; i++) {
1444*9e39c5baSBill Taylor 		cqe = &cq->cq_addr[check_indx];
1445*9e39c5baSBill Taylor 
1446*9e39c5baSBill Taylor 		/* Grab QP number from CQE */
1447*9e39c5baSBill Taylor 		cqe_qpnum = TAVOR_CQE_QPNUM_GET(cqe);
1448*9e39c5baSBill Taylor 		cqe_type = TAVOR_CQE_SENDRECV_GET(cqe);
1449*9e39c5baSBill Taylor 
1450*9e39c5baSBill Taylor 		/*
1451*9e39c5baSBill Taylor 		 * If the QP number is the same in the CQE as the QP that we
1452*9e39c5baSBill Taylor 		 * have on this SRQ, then we must free up the entry off the
1453*9e39c5baSBill Taylor 		 * SRQ.  We also make sure that the completion type is of the
1454*9e39c5baSBill Taylor 		 * 'TAVOR_COMPLETION_RECV' type.  So any send completions on
1455*9e39c5baSBill Taylor 		 * this CQ will be left as-is.  The handling of returning
1456*9e39c5baSBill Taylor 		 * entries back to HW ownership happens further down.
1457*9e39c5baSBill Taylor 		 */
1458*9e39c5baSBill Taylor 		if (cqe_qpnum == qp->qp_num &&
1459*9e39c5baSBill Taylor 		    cqe_type == TAVOR_COMPLETION_RECV) {
1460*9e39c5baSBill Taylor 			/* Add back to SRQ free list */
1461*9e39c5baSBill Taylor 			(void) dapli_tavor_wrid_find_match_srq(
1462*9e39c5baSBill Taylor 			    wqhdr->wq_wrid_post, cqe);
1463*9e39c5baSBill Taylor 		} else {
1464*9e39c5baSBill Taylor 			/* Do Copy */
1465*9e39c5baSBill Taylor 			if (check_indx != new_indx) {
1466*9e39c5baSBill Taylor 				next_cqe = &cq->cq_addr[new_indx];
1467*9e39c5baSBill Taylor 				/*
1468*9e39c5baSBill Taylor 				 * Copy the CQE into the "next_cqe"
1469*9e39c5baSBill Taylor 				 * pointer.
1470*9e39c5baSBill Taylor 				 */
1471*9e39c5baSBill Taylor 				(void) dapl_os_memcpy(next_cqe, cqe,
1472*9e39c5baSBill Taylor 				    sizeof (tavor_hw_cqe_t));
1473*9e39c5baSBill Taylor 			}
1474*9e39c5baSBill Taylor 			new_indx = (new_indx - 1) & wrap_around_mask;
1475*9e39c5baSBill Taylor 		}
1476*9e39c5baSBill Taylor 		/* Move index to next CQE to check */
1477*9e39c5baSBill Taylor 		check_indx = (check_indx - 1) & wrap_around_mask;
1478*9e39c5baSBill Taylor 	}
1479*9e39c5baSBill Taylor 
1480*9e39c5baSBill Taylor 	/* Initialize removed cqes count */
1481*9e39c5baSBill Taylor 	removed_cqes = 0;
1482*9e39c5baSBill Taylor 
1483*9e39c5baSBill Taylor 	/* If an entry was removed */
1484*9e39c5baSBill Taylor 	if (check_indx != new_indx) {
1485*9e39c5baSBill Taylor 
1486*9e39c5baSBill Taylor 		/*
1487*9e39c5baSBill Taylor 		 * Set current pointer back to the beginning consumer index.
1488*9e39c5baSBill Taylor 		 * At this point, all unclaimed entries have been copied to the
1489*9e39c5baSBill Taylor 		 * index specified by 'new_indx'.  This 'new_indx' will be used
1490*9e39c5baSBill Taylor 		 * as the new consumer index after we mark all freed entries as
1491*9e39c5baSBill Taylor 		 * having HW ownership.  We do that here.
1492*9e39c5baSBill Taylor 		 */
1493*9e39c5baSBill Taylor 
1494*9e39c5baSBill Taylor 		/* Loop through all entries until we reach our new pointer */
1495*9e39c5baSBill Taylor 		for (indx = cons_indx; indx <= new_indx;
1496*9e39c5baSBill Taylor 		    indx = (indx + 1) & wrap_around_mask) {
1497*9e39c5baSBill Taylor 			removed_cqes++;
1498*9e39c5baSBill Taylor 			cqe = &cq->cq_addr[indx];
1499*9e39c5baSBill Taylor 
1500*9e39c5baSBill Taylor 			/* Reset entry to hardware ownership */
1501*9e39c5baSBill Taylor 			TAVOR_CQE_OWNER_SET_HW(cqe);
1502*9e39c5baSBill Taylor 		}
1503*9e39c5baSBill Taylor 	}
1504*9e39c5baSBill Taylor 
1505*9e39c5baSBill Taylor 	/*
1506*9e39c5baSBill Taylor 	 * Update consumer index to be the 'new_indx'.  This moves it past all
1507*9e39c5baSBill Taylor 	 * removed entries.  Because 'new_indx' is pointing to the last
1508*9e39c5baSBill Taylor 	 * previously valid SW owned entry, we add 1 to point the cons_indx to
1509*9e39c5baSBill Taylor 	 * the first HW owned entry.
1510*9e39c5baSBill Taylor 	 */
1511*9e39c5baSBill Taylor 	cons_indx = (new_indx + 1) & wrap_around_mask;
1512*9e39c5baSBill Taylor 
1513*9e39c5baSBill Taylor 	/*
1514*9e39c5baSBill Taylor 	 * Now we only ring the doorbell (to update the consumer index) if
1515*9e39c5baSBill Taylor 	 * we've actually consumed a CQ entry.  If we found no QP number
1516*9e39c5baSBill Taylor 	 * matches above, then we would not have removed anything.  So only if
1517*9e39c5baSBill Taylor 	 * something was removed do we ring the doorbell.
1518*9e39c5baSBill Taylor 	 */
1519*9e39c5baSBill Taylor 	if ((removed_cqes != 0) && (cq->cq_consindx != cons_indx)) {
1520*9e39c5baSBill Taylor 		/*
1521*9e39c5baSBill Taylor 		 * Update the consumer index in both the CQ handle and the
1522*9e39c5baSBill Taylor 		 * doorbell record.
1523*9e39c5baSBill Taylor 		 */
1524*9e39c5baSBill Taylor 		cq->cq_consindx = cons_indx;
1525*9e39c5baSBill Taylor 		dapli_arbel_cq_update_ci(cq, cq->cq_poll_dbp);
1526*9e39c5baSBill Taylor 	}
1527*9e39c5baSBill Taylor }
1528*9e39c5baSBill Taylor 
1529*9e39c5baSBill Taylor static void
dapli_arbel_rq_prelink(caddr_t first,uint32_t desc_off,uint32_t wqesz,uint32_t numwqe,uint32_t nds)1530*9e39c5baSBill Taylor dapli_arbel_rq_prelink(caddr_t first, uint32_t desc_off, uint32_t wqesz,
1531*9e39c5baSBill Taylor     uint32_t numwqe, uint32_t nds)
1532*9e39c5baSBill Taylor {
1533*9e39c5baSBill Taylor 	int i;
1534*9e39c5baSBill Taylor 	uint32_t *p = (uint32_t *)(uintptr_t)first;
1535*9e39c5baSBill Taylor 	uint32_t off = desc_off;
1536*9e39c5baSBill Taylor 	uint32_t pincr = wqesz / sizeof (uint32_t);
1537*9e39c5baSBill Taylor 	ibt_wr_ds_t sgl;
1538*9e39c5baSBill Taylor 
1539*9e39c5baSBill Taylor 	sgl.ds_va = (ib_vaddr_t)0;
1540*9e39c5baSBill Taylor 	sgl.ds_key = ARBEL_WQE_SGL_INVALID_LKEY;
1541*9e39c5baSBill Taylor 	sgl.ds_len = (ib_msglen_t)0;
1542*9e39c5baSBill Taylor 
1543*9e39c5baSBill Taylor 	for (i = 0; i < numwqe - 1; i++, p += pincr) {
1544*9e39c5baSBill Taylor 		off += wqesz;
1545*9e39c5baSBill Taylor 		p[0] = HTOBE_32(off);	/* link curr to next */
1546*9e39c5baSBill Taylor 		p[1] = nds;		/* nds is 0 for SRQ */
1547*9e39c5baSBill Taylor 		TAVOR_WQE_BUILD_DATA_SEG((void *)&p[2], &sgl);
1548*9e39c5baSBill Taylor 	}
1549*9e39c5baSBill Taylor 	p[0] = HTOBE_32(desc_off); /* link last to first */
1550*9e39c5baSBill Taylor 	p[1] = nds;
1551*9e39c5baSBill Taylor 	TAVOR_WQE_BUILD_DATA_SEG((void *)&p[2], &sgl);
1552*9e39c5baSBill Taylor }
1553*9e39c5baSBill Taylor 
1554*9e39c5baSBill Taylor static void
dapli_arbel_sq_prelink(caddr_t first,uint32_t desc_off,uint32_t wqesz,uint32_t numwqe)1555*9e39c5baSBill Taylor dapli_arbel_sq_prelink(caddr_t first, uint32_t desc_off, uint32_t wqesz,
1556*9e39c5baSBill Taylor     uint32_t numwqe)
1557*9e39c5baSBill Taylor {
1558*9e39c5baSBill Taylor 	int i;
1559*9e39c5baSBill Taylor 	uint32_t *p = (uint32_t *)(uintptr_t)first;
1560*9e39c5baSBill Taylor 	uint32_t off = desc_off;
1561*9e39c5baSBill Taylor 	uint32_t pincr = wqesz / sizeof (uint32_t);
1562*9e39c5baSBill Taylor 
1563*9e39c5baSBill Taylor 	for (i = 0; i < numwqe - 1; i++, p += pincr) {
1564*9e39c5baSBill Taylor 		off += wqesz;
1565*9e39c5baSBill Taylor 		p[0] = HTOBE_32(off);	/* link curr to next */
1566*9e39c5baSBill Taylor 	}
1567*9e39c5baSBill Taylor 	p[0] = HTOBE_32(desc_off); /* link last to first */
1568*9e39c5baSBill Taylor }
1569*9e39c5baSBill Taylor 
1570*9e39c5baSBill Taylor static void
dapli_arbel_qp_init(ib_qp_handle_t qp)1571*9e39c5baSBill Taylor dapli_arbel_qp_init(ib_qp_handle_t qp)
1572*9e39c5baSBill Taylor {
1573*9e39c5baSBill Taylor 	(qp->qp_sq_dbp)[1] = HTOBE_32((qp->qp_num << 8) | ARBEL_DBR_SQ);
1574*9e39c5baSBill Taylor 	if (qp->qp_srq_enabled == 0) {
1575*9e39c5baSBill Taylor 		(qp->qp_rq_dbp)[1] = HTOBE_32((qp->qp_num << 8) | ARBEL_DBR_RQ);
1576*9e39c5baSBill Taylor 
1577*9e39c5baSBill Taylor 		/* pre-link the whole receive queue */
1578*9e39c5baSBill Taylor 		dapli_arbel_rq_prelink(qp->qp_rq_buf, qp->qp_rq_desc_addr,
1579*9e39c5baSBill Taylor 		    qp->qp_rq_wqesz, qp->qp_rq_numwqe,
1580*9e39c5baSBill Taylor 		    HTOBE_32(qp->qp_rq_wqesz >> 4));
1581*9e39c5baSBill Taylor 	}
1582*9e39c5baSBill Taylor 	dapli_arbel_sq_prelink(qp->qp_sq_buf, qp->qp_sq_desc_addr,
1583*9e39c5baSBill Taylor 	    qp->qp_sq_wqesz, qp->qp_sq_numwqe);
1584*9e39c5baSBill Taylor 	qp->qp_sq_lastwqeaddr = (uint64_t *)((uintptr_t)qp->qp_sq_buf +
1585*9e39c5baSBill Taylor 	    ((qp->qp_sq_numwqe - 1) * qp->qp_sq_wqesz));
1586*9e39c5baSBill Taylor 	qp->qp_rq_counter = 0;
1587*9e39c5baSBill Taylor 	qp->qp_sq_counter = 0;
1588*9e39c5baSBill Taylor }
1589*9e39c5baSBill Taylor 
1590*9e39c5baSBill Taylor static void
dapli_arbel_cq_init(ib_cq_handle_t cq)1591*9e39c5baSBill Taylor dapli_arbel_cq_init(ib_cq_handle_t cq)
1592*9e39c5baSBill Taylor {
1593*9e39c5baSBill Taylor 	(cq->cq_poll_dbp)[1] =
1594*9e39c5baSBill Taylor 	    HTOBE_32((cq->cq_num << 8) | ARBEL_DBR_CQ_SET_CI);
1595*9e39c5baSBill Taylor 	(cq->cq_arm_dbp)[1] =
1596*9e39c5baSBill Taylor 	    HTOBE_32((cq->cq_num << 8) | ARBEL_DBR_CQ_ARM | 0x8);
1597*9e39c5baSBill Taylor 	/* cq_resize -- needs testing */
1598*9e39c5baSBill Taylor }
1599*9e39c5baSBill Taylor 
1600*9e39c5baSBill Taylor static void
dapli_arbel_srq_init(ib_srq_handle_t srq)1601*9e39c5baSBill Taylor dapli_arbel_srq_init(ib_srq_handle_t srq)
1602*9e39c5baSBill Taylor {
1603*9e39c5baSBill Taylor 	(srq->srq_dbp)[1] =
1604*9e39c5baSBill Taylor 	    HTOBE_32((srq->srq_num << 8) | ARBEL_DBR_SRQ);
1605*9e39c5baSBill Taylor 
1606*9e39c5baSBill Taylor 	/* pre-link the whole shared receive queue */
1607*9e39c5baSBill Taylor 	dapli_arbel_rq_prelink(srq->srq_addr, srq->srq_wq_desc_addr,
1608*9e39c5baSBill Taylor 	    srq->srq_wq_wqesz, srq->srq_wq_numwqe, 0);
1609*9e39c5baSBill Taylor 	srq->srq_counter = 0;
1610*9e39c5baSBill Taylor 
1611*9e39c5baSBill Taylor 	/* needs testing */
1612*9e39c5baSBill Taylor }
1613*9e39c5baSBill Taylor 
1614*9e39c5baSBill Taylor void
dapls_init_funcs_arbel(DAPL_HCA * hca_ptr)1615*9e39c5baSBill Taylor dapls_init_funcs_arbel(DAPL_HCA *hca_ptr)
1616*9e39c5baSBill Taylor {
1617*9e39c5baSBill Taylor 	hca_ptr->post_send = dapli_arbel_post_send;
1618*9e39c5baSBill Taylor 	hca_ptr->post_recv = dapli_arbel_post_recv;
1619*9e39c5baSBill Taylor 	hca_ptr->post_srq = dapli_arbel_post_srq;
1620*9e39c5baSBill Taylor 	hca_ptr->cq_peek = dapli_arbel_cq_peek;
1621*9e39c5baSBill Taylor 	hca_ptr->cq_poll = dapli_arbel_cq_poll;
1622*9e39c5baSBill Taylor 	hca_ptr->cq_poll_one = dapli_arbel_cq_poll_one;
1623*9e39c5baSBill Taylor 	hca_ptr->cq_notify = dapli_arbel_cq_notify;
1624*9e39c5baSBill Taylor 	hca_ptr->srq_flush = dapli_arbel_cq_srq_entries_flush;
1625*9e39c5baSBill Taylor 	hca_ptr->qp_init = dapli_arbel_qp_init;
1626*9e39c5baSBill Taylor 	hca_ptr->cq_init = dapli_arbel_cq_init;
1627*9e39c5baSBill Taylor 	hca_ptr->srq_init = dapli_arbel_srq_init;
1628*9e39c5baSBill Taylor 	hca_ptr->hermon_resize_cq = 0;
1629*9e39c5baSBill Taylor }
1630