1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1,  (the "License").
26 
27 * You may not use this file except in compliance with the License.
28 
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31 
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35 
36 #ifndef __QED_HSI_RDMA__
37 #define __QED_HSI_RDMA__
38 /************************************************************************/
39 /* Add include to common rdma target for both eCore and protocol rdma driver */
40 /************************************************************************/
41 #include "rdma_common.h"
42 
43 /*
44  * rdma completion notification queue element
45  */
46 struct rdma_cnqe
47 {
48 	struct regpair cq_handle;
49 };
50 
51 
52 struct rdma_cqe_responder
53 {
54 	struct regpair srq_wr_id;
55 	struct regpair qp_handle;
56 	__le32 imm_data_or_inv_r_Key /* immediate data in case imm_flg is set, or invalidated r_key in case inv_flg is set */;
57 	__le32 length;
58 	__le32 imm_data_hi /* High bytes of immediate data in case imm_flg is set in iWARP only */;
59 	__le16 rq_cons /* Valid only when status is WORK_REQUEST_FLUSHED_ERR. Indicates an aggregative flush on all posted RQ WQEs until the reported rq_cons. */;
60 	u8 flags;
61 #define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
62 #define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
63 #define RDMA_CQE_RESPONDER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
64 #define RDMA_CQE_RESPONDER_TYPE_SHIFT       1
65 #define RDMA_CQE_RESPONDER_INV_FLG_MASK     0x1 /* r_key invalidated indicator */
66 #define RDMA_CQE_RESPONDER_INV_FLG_SHIFT    3
67 #define RDMA_CQE_RESPONDER_IMM_FLG_MASK     0x1 /* immediate data indicator */
68 #define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT    4
69 #define RDMA_CQE_RESPONDER_RDMA_FLG_MASK    0x1 /* 1=this CQE relates to an RDMA Write. 0=Send. */
70 #define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT   5
71 #define RDMA_CQE_RESPONDER_RESERVED2_MASK   0x3
72 #define RDMA_CQE_RESPONDER_RESERVED2_SHIFT  6
73 	u8 status;
74 };
75 
76 struct rdma_cqe_requester
77 {
78 	__le16 sq_cons;
79 	__le16 reserved0;
80 	__le32 reserved1;
81 	struct regpair qp_handle;
82 	struct regpair reserved2;
83 	__le32 reserved3;
84 	__le16 reserved4;
85 	u8 flags;
86 #define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
87 #define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
88 #define RDMA_CQE_REQUESTER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
89 #define RDMA_CQE_REQUESTER_TYPE_SHIFT       1
90 #define RDMA_CQE_REQUESTER_RESERVED5_MASK   0x1F
91 #define RDMA_CQE_REQUESTER_RESERVED5_SHIFT  3
92 	u8 status;
93 };
94 
95 struct rdma_cqe_common
96 {
97 	struct regpair reserved0;
98 	struct regpair qp_handle;
99 	__le16 reserved1[7];
100 	u8 flags;
101 #define RDMA_CQE_COMMON_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
102 #define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
103 #define RDMA_CQE_COMMON_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
104 #define RDMA_CQE_COMMON_TYPE_SHIFT       1
105 #define RDMA_CQE_COMMON_RESERVED2_MASK   0x1F
106 #define RDMA_CQE_COMMON_RESERVED2_SHIFT  3
107 	u8 status;
108 };
109 
110 /*
111  * rdma completion queue element
112  */
113 union rdma_cqe
114 {
115 	struct rdma_cqe_responder resp;
116 	struct rdma_cqe_requester req;
117 	struct rdma_cqe_common cmn;
118 };
119 
120 
121 
122 
123 /*
124  * CQE requester status enumeration
125  */
126 enum rdma_cqe_requester_status_enum
127 {
128 	RDMA_CQE_REQ_STS_OK,
129 	RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
130 	RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
131 	RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
132 	RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
133 	RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
134 	RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
135 	RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
136 	RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
137 	RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
138 	RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
139 	RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
140 	MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
141 };
142 
143 
144 
145 /*
146  * CQE responder status enumeration
147  */
148 enum rdma_cqe_responder_status_enum
149 {
150 	RDMA_CQE_RESP_STS_OK,
151 	RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
152 	RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
153 	RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
154 	RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
155 	RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
156 	RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
157 	RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
158 	MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
159 };
160 
161 
162 /*
163  * CQE type enumeration
164  */
165 enum rdma_cqe_type
166 {
167 	RDMA_CQE_TYPE_REQUESTER,
168 	RDMA_CQE_TYPE_RESPONDER_RQ,
169 	RDMA_CQE_TYPE_RESPONDER_SRQ,
170 	RDMA_CQE_TYPE_INVALID,
171 	MAX_RDMA_CQE_TYPE
172 };
173 
174 
175 /*
176  * DIF Block size options
177  */
178 enum rdma_dif_block_size
179 {
180 	RDMA_DIF_BLOCK_512=0,
181 	RDMA_DIF_BLOCK_4096=1,
182 	MAX_RDMA_DIF_BLOCK_SIZE
183 };
184 
185 
186 /*
187  * DIF CRC initial value
188  */
189 enum rdma_dif_crc_seed
190 {
191 	RDMA_DIF_CRC_SEED_0000=0,
192 	RDMA_DIF_CRC_SEED_FFFF=1,
193 	MAX_RDMA_DIF_CRC_SEED
194 };
195 
196 
197 /*
198  * RDMA DIF Error Result Structure
199  */
200 struct rdma_dif_error_result
201 {
202 	__le32 error_intervals /* Total number of error intervals in the IO. */;
203 	__le32 dif_error_1st_interval /* Number of the first interval that contained error. Set to 0xFFFFFFFF if error occurred in the Runt Block. */;
204 	u8 flags;
205 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK      0x1 /* CRC error occurred. */
206 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT     0
207 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK  0x1 /* App Tag error occurred. */
208 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
209 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK  0x1 /* Ref Tag error occurred. */
210 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
211 #define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK               0xF
212 #define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT              3
213 #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK              0x1 /* Used to indicate the structure is valid. Toggles each time an invalidate region is performed. */
214 #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT             7
215 	u8 reserved1[55] /* Pad to 64 bytes to ensure efficient word line writing. */;
216 };
217 
218 
219 /*
220  * DIF IO direction
221  */
222 enum rdma_dif_io_direction_flg
223 {
224 	RDMA_DIF_DIR_RX=0,
225 	RDMA_DIF_DIR_TX=1,
226 	MAX_RDMA_DIF_IO_DIRECTION_FLG
227 };
228 
229 
230 /*
231  * RDMA DIF Runt Result Structure
232  */
233 struct rdma_dif_runt_result
234 {
235 	__le16 guard_tag /* CRC result of received IO. */;
236 	__le16 reserved[3];
237 };
238 
239 
240 /*
241  * memory window type enumeration
242  */
243 enum rdma_mw_type
244 {
245 	RDMA_MW_TYPE_1,
246 	RDMA_MW_TYPE_2A,
247 	MAX_RDMA_MW_TYPE
248 };
249 
250 
251 struct rdma_rq_sge
252 {
253 	struct regpair addr;
254 	__le32 length;
255 	__le32 flags;
256 #define RDMA_RQ_SGE_L_KEY_MASK      0x3FFFFFF /* key of memory relating to this RQ */
257 #define RDMA_RQ_SGE_L_KEY_SHIFT     0
258 #define RDMA_RQ_SGE_NUM_SGES_MASK   0x7 /* first SGE - number of SGEs in this RQ WQE. Other SGEs - should be set to 0 */
259 #define RDMA_RQ_SGE_NUM_SGES_SHIFT  26
260 #define RDMA_RQ_SGE_RESERVED0_MASK  0x7
261 #define RDMA_RQ_SGE_RESERVED0_SHIFT 29
262 };
263 
264 
265 struct rdma_sq_atomic_wqe
266 {
267 	__le32 reserved1;
268 	__le32 length /* Total data length (8 bytes for Atomic) */;
269 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
270 	u8 req_type /* Type of WQE */;
271 	u8 flags;
272 #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
273 #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT        0
274 #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
275 #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT    1
276 #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
277 #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT   2
278 #define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK           0x1 /* Dont care for atomic wqe */
279 #define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT          3
280 #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for atomic wqe */
281 #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT      4
282 #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for atomic wqe */
283 #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
284 #define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK        0x3
285 #define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT       6
286 	u8 wqe_size /* Size of WQE in 16B chunks including SGE */;
287 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
288 	struct regpair remote_va /* remote virtual address */;
289 	__le32 r_key /* Remote key */;
290 	__le32 reserved2;
291 	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
292 	struct regpair swap_data /* Swap or add data */;
293 };
294 
295 
296 /*
297  * First element (16 bytes) of atomic wqe
298  */
299 struct rdma_sq_atomic_wqe_1st
300 {
301 	__le32 reserved1;
302 	__le32 length /* Total data length (8 bytes for Atomic) */;
303 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
304 	u8 req_type /* Type of WQE */;
305 	u8 flags;
306 #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
307 #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT      0
308 #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
309 #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT  1
310 #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
311 #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
312 #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for atomic wqe */
313 #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT        3
314 #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for atomic wqe */
315 #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT    4
316 #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK      0x7
317 #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT     5
318 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs. Set to number of SGEs + 1. */;
319 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
320 };
321 
322 
323 /*
324  * Second element (16 bytes) of atomic wqe
325  */
326 struct rdma_sq_atomic_wqe_2nd
327 {
328 	struct regpair remote_va /* remote virtual address */;
329 	__le32 r_key /* Remote key */;
330 	__le32 reserved2;
331 };
332 
333 
334 /*
335  * Third element (16 bytes) of atomic wqe
336  */
337 struct rdma_sq_atomic_wqe_3rd
338 {
339 	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
340 	struct regpair swap_data /* Swap or add data */;
341 };
342 
343 
344 struct rdma_sq_bind_wqe
345 {
346 	struct regpair addr;
347 	__le32 l_key;
348 	u8 req_type /* Type of WQE */;
349 	u8 flags;
350 #define RDMA_SQ_BIND_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
351 #define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT      0
352 #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
353 #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT  1
354 #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
355 #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
356 #define RDMA_SQ_BIND_WQE_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
357 #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT        3
358 #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
359 #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT    4
360 #define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x7
361 #define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     5
362 	u8 wqe_size /* Size of WQE in 16B chunks */;
363 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
364 	u8 bind_ctrl;
365 #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK     0x1 /* zero based indication */
366 #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT    0
367 #define RDMA_SQ_BIND_WQE_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
368 #define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT       1
369 #define RDMA_SQ_BIND_WQE_RESERVED1_MASK      0x3F
370 #define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT     2
371 	u8 access_ctrl;
372 #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK    0x1
373 #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT   0
374 #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK   0x1
375 #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT  1
376 #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK  0x1
377 #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
378 #define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK     0x1
379 #define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT    3
380 #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK    0x1
381 #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT   4
382 #define RDMA_SQ_BIND_WQE_RESERVED2_MASK      0x7
383 #define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT     5
384 	u8 reserved3;
385 	u8 length_hi /* upper 8 bits of the registered MW length */;
386 	__le32 length_lo /* lower 32 bits of the registered MW length */;
387 	__le32 parent_l_key /* l_key of the parent MR */;
388 	__le32 reserved4;
389 };
390 
391 
392 /*
393  * First element (16 bytes) of bind wqe
394  */
395 struct rdma_sq_bind_wqe_1st
396 {
397 	struct regpair addr;
398 	__le32 l_key;
399 	u8 req_type /* Type of WQE */;
400 	u8 flags;
401 #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
402 #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT      0
403 #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
404 #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
405 #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
406 #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
407 #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
408 #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT        3
409 #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
410 #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT    4
411 #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK      0x7
412 #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT     5
413 	u8 wqe_size /* Size of WQE in 16B chunks */;
414 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
415 };
416 
417 
418 /*
419  * Second element (16 bytes) of bind wqe
420  */
421 struct rdma_sq_bind_wqe_2nd
422 {
423 	u8 bind_ctrl;
424 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
425 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT    0
426 #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
427 #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT       1
428 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x3F
429 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     2
430 	u8 access_ctrl;
431 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK    0x1
432 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT   0
433 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK   0x1
434 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT  1
435 #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
436 #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
437 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK     0x1
438 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT    3
439 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK    0x1
440 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT   4
441 #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK      0x7
442 #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT     5
443 	u8 reserved3;
444 	u8 length_hi /* upper 8 bits of the registered MW length */;
445 	__le32 length_lo /* lower 32 bits of the registered MW length */;
446 	__le32 parent_l_key /* l_key of the parent MR */;
447 	__le32 reserved4;
448 };
449 
450 
451 /*
452  * Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
453  */
454 struct rdma_sq_common_wqe
455 {
456 	__le32 reserved1[3];
457 	u8 req_type /* Type of WQE */;
458 	u8 flags;
459 #define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
460 #define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT      0
461 #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
462 #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT  1
463 #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
464 #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
465 #define RDMA_SQ_COMMON_WQE_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE (only relevant in SENDs and RDMA write with Imm) */
466 #define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT        3
467 #define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs (only relevant in SENDs and RDMA writes) */
468 #define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT    4
469 #define RDMA_SQ_COMMON_WQE_RESERVED0_MASK      0x7
470 #define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT     5
471 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
472 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
473 };
474 
475 
476 struct rdma_sq_fmr_wqe
477 {
478 	struct regpair addr;
479 	__le32 l_key;
480 	u8 req_type /* Type of WQE */;
481 	u8 flags;
482 #define RDMA_SQ_FMR_WQE_COMP_FLG_MASK                0x1 /* If set, completion will be generated when the WQE is completed */
483 #define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT               0
484 #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK            0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
485 #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT           1
486 #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK           0x1 /* If set, all pending operations will be completed before start processing this WQE */
487 #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT          2
488 #define RDMA_SQ_FMR_WQE_SE_FLG_MASK                  0x1 /* Dont care for FMR wqe */
489 #define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT                 3
490 #define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK              0x1 /* Should be 0 for FMR wqe */
491 #define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT             4
492 #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK         0x1 /* If set, indicated host memory of this WQE is DIF protected. */
493 #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT        5
494 #define RDMA_SQ_FMR_WQE_RESERVED0_MASK               0x3
495 #define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT              6
496 	u8 wqe_size /* Size of WQE in 16B chunks */;
497 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
498 	u8 fmr_ctrl;
499 #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK           0x1F /* 0 is 4k, 1 is 8k... */
500 #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT          0
501 #define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK              0x1 /* zero based indication */
502 #define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT             5
503 #define RDMA_SQ_FMR_WQE_BIND_EN_MASK                 0x1 /* indication whether bind is enabled for this MR */
504 #define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT                6
505 #define RDMA_SQ_FMR_WQE_RESERVED1_MASK               0x1
506 #define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT              7
507 	u8 access_ctrl;
508 #define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK             0x1
509 #define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT            0
510 #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK            0x1
511 #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT           1
512 #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK           0x1
513 #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT          2
514 #define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK              0x1
515 #define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT             3
516 #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK             0x1
517 #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT            4
518 #define RDMA_SQ_FMR_WQE_RESERVED2_MASK               0x7
519 #define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT              5
520 	u8 reserved3;
521 	u8 length_hi /* upper 8 bits of the registered MR length */;
522 	__le32 length_lo /* lower 32 bits of the registered MR length. In case of DIF the length is specified including the DIF guards. */;
523 	struct regpair pbl_addr /* Address of PBL */;
524 	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
525 	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
526 	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
527 	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
528 	__le16 dif_flags;
529 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
530 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
531 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
532 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
533 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
534 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
535 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
536 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
537 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
538 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
539 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
540 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
541 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
542 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
543 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK    0x1 /* In RX IO, Ref Tag will remain at constant value of dif_base_ref_tag */
544 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT   7
545 #define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0xFF
546 #define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              8
547 	__le32 Reserved5;
548 };
549 
550 
551 /*
552  * First element (16 bytes) of fmr wqe
553  */
554 struct rdma_sq_fmr_wqe_1st
555 {
556 	struct regpair addr;
557 	__le32 l_key;
558 	u8 req_type /* Type of WQE */;
559 	u8 flags;
560 #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
561 #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT        0
562 #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
563 #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT    1
564 #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
565 #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT   2
566 #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK           0x1 /* Dont care for FMR wqe */
567 #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT          3
568 #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK       0x1 /* Should be 0 for FMR wqe */
569 #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT      4
570 #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
571 #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
572 #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK        0x3
573 #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT       6
574 	u8 wqe_size /* Size of WQE in 16B chunks */;
575 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
576 };
577 
578 
579 /*
580  * Second element (16 bytes) of fmr wqe
581  */
582 struct rdma_sq_fmr_wqe_2nd
583 {
584 	u8 fmr_ctrl;
585 #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK  0x1F /* 0 is 4k, 1 is 8k... */
586 #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
587 #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
588 #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT    5
589 #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK        0x1 /* indication whether bind is enabled for this MR */
590 #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT       6
591 #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK      0x1
592 #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT     7
593 	u8 access_ctrl;
594 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK    0x1
595 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT   0
596 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK   0x1
597 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT  1
598 #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
599 #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
600 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK     0x1
601 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT    3
602 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK    0x1
603 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT   4
604 #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK      0x7
605 #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT     5
606 	u8 reserved3;
607 	u8 length_hi /* upper 8 bits of the registered MR length */;
608 	__le32 length_lo /* lower 32 bits of the registered MR length. */;
609 	struct regpair pbl_addr /* Address of PBL */;
610 };
611 
612 
613 /*
614  * Third element (16 bytes) of fmr wqe
615  */
616 struct rdma_sq_fmr_wqe_3rd
617 {
618 	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
619 	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
620 	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
621 	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
622 	__le16 dif_flags;
623 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
624 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
625 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
626 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
627 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
628 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
629 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
630 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
631 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
632 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
633 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
634 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
635 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
636 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
637 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
638 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
639 	__le32 Reserved5;
640 };
641 
642 
643 struct rdma_sq_local_inv_wqe
644 {
645 	struct regpair reserved;
646 	__le32 inv_l_key /* The invalidate local key */;
647 	u8 req_type /* Type of WQE */;
648 	u8 flags;
649 #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
650 #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT        0
651 #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
652 #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT    1
653 #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
654 #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT   2
655 #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK           0x1 /* Dont care for local invalidate wqe */
656 #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT          3
657 #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for local invalidate wqe */
658 #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT      4
659 #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
660 #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
661 #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK        0x3
662 #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT       6
663 	u8 wqe_size /* Size of WQE in 16B chunks */;
664 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
665 };
666 
667 
668 struct rdma_sq_rdma_wqe
669 {
670 	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
671 	__le32 length /* Total data length. If DIF on host is enabled, length does NOT include DIF guards. */;
672 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
673 	u8 req_type /* Type of WQE */;
674 	u8 flags;
675 #define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1 /* If set, completion will be generated when the WQE is completed */
676 #define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
677 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
678 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
679 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1 /* If set, all pending operations will be completed before start processing this WQE */
680 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
681 #define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1 /* If set, signal the responder to generate a solicited event on this WQE */
682 #define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
683 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
684 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
685 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1 /* If set, indicated host memory of this WQE is DIF protected. */
686 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
687 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK              0x1 /* If set, indicated read with invalidate WQE. iWARP only */
688 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT             6
689 #define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x1
690 #define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                7
691 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
692 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
693 	struct regpair remote_va /* Remote virtual address */;
694 	__le32 r_key /* Remote key */;
695 	u8 dif_flags;
696 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK            0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
697 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT           0
698 #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first RDMA on related IO. */
699 #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
700 #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last RDMA on related IO. */
701 #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT  2
702 #define RDMA_SQ_RDMA_WQE_RESERVED1_MASK                 0x1F
703 #define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT                3
704 	u8 reserved2[3];
705 };
706 
707 
708 /*
709  * First element (16 bytes) of rdma wqe
710  */
711 struct rdma_sq_rdma_wqe_1st
712 {
713 	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
714 	__le32 length /* Total data length */;
715 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
716 	u8 req_type /* Type of WQE */;
717 	u8 flags;
718 #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
719 #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT        0
720 #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
721 #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT    1
722 #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
723 #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT   2
724 #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
725 #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT          3
726 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
727 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT      4
728 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
729 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
730 #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK     0x1 /* If set, indicated read with invalidate WQE. iWARP only */
731 #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT    6
732 #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x1
733 #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       7
734 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
735 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
736 };
737 
738 
739 /*
740  * Second element (16 bytes) of rdma wqe
741  */
742 struct rdma_sq_rdma_wqe_2nd
743 {
744 	struct regpair remote_va /* Remote virtual address */;
745 	__le32 r_key /* Remote key */;
746 	u8 dif_flags;
747 #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK         0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
748 #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT        0
749 #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first DIF on related MR. */
750 #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
751 #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last DIF on related MR. */
752 #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT  2
753 #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK              0x1F
754 #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT             3
755 	u8 reserved2[3];
756 };
757 
758 
759 /*
760  * SQ WQE req type enumeration
761  */
762 enum rdma_sq_req_type
763 {
764 	RDMA_SQ_REQ_TYPE_SEND,
765 	RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
766 	RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
767 	RDMA_SQ_REQ_TYPE_RDMA_WR,
768 	RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
769 	RDMA_SQ_REQ_TYPE_RDMA_RD,
770 	RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
771 	RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
772 	RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
773 	RDMA_SQ_REQ_TYPE_FAST_MR,
774 	RDMA_SQ_REQ_TYPE_BIND,
775 	RDMA_SQ_REQ_TYPE_INVALID,
776 	MAX_RDMA_SQ_REQ_TYPE
777 };
778 
779 
780 struct rdma_sq_send_wqe
781 {
782 	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
783 	__le32 length /* Total data length */;
784 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
785 	u8 req_type /* Type of WQE */;
786 	u8 flags;
787 #define RDMA_SQ_SEND_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
788 #define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT        0
789 #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
790 #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT    1
791 #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
792 #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT   2
793 #define RDMA_SQ_SEND_WQE_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
794 #define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT          3
795 #define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
796 #define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT      4
797 #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for send wqe */
798 #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
799 #define RDMA_SQ_SEND_WQE_RESERVED0_MASK        0x3
800 #define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT       6
801 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
802 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
803 	__le32 reserved1[4];
804 };
805 
806 
807 struct rdma_sq_send_wqe_1st
808 {
809 	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
810 	__le32 length /* Total data length */;
811 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
812 	u8 req_type /* Type of WQE */;
813 	u8 flags;
814 #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
815 #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT      0
816 #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
817 #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
818 #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
819 #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
820 #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE */
821 #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT        3
822 #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
823 #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT    4
824 #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK      0x7
825 #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT     5
826 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
827 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
828 };
829 
830 
831 struct rdma_sq_send_wqe_2st
832 {
833 	__le32 reserved1[4];
834 };
835 
836 
837 struct rdma_sq_sge
838 {
839 	__le32 length /* Total length of the send. If DIF on host is enabled, SGE length includes the DIF guards. */;
840 	struct regpair addr;
841 	__le32 l_key;
842 };
843 
844 
845 struct rdma_srq_wqe_header
846 {
847 	struct regpair wr_id;
848 	u8 num_sges /* number of SGEs in WQE */;
849 	u8 reserved2[7];
850 };
851 
852 struct rdma_srq_sge
853 {
854 	struct regpair addr;
855 	__le32 length;
856 	__le32 l_key;
857 };
858 
859 /*
860  * rdma srq sge
861  */
862 union rdma_srq_elm
863 {
864 	struct rdma_srq_wqe_header header;
865 	struct rdma_srq_sge sge;
866 };
867 
868 
869 
870 
871 /*
872  * Rdma doorbell data for flags update
873  */
874 struct rdma_pwm_flags_data
875 {
876 	__le16 icid /* internal CID */;
877 	u8 agg_flags /* aggregative flags */;
878 	u8 reserved;
879 };
880 
881 
882 /*
883  * Rdma doorbell data for SQ and RQ
884  */
885 struct rdma_pwm_val16_data
886 {
887 	__le16 icid /* internal CID */;
888 	__le16 value /* aggregated value to update */;
889 };
890 
891 
892 union rdma_pwm_val16_data_union
893 {
894 	struct rdma_pwm_val16_data as_struct /* Parameters field */;
895 	__le32 as_dword;
896 };
897 
898 
899 /*
900  * Rdma doorbell data for CQ
901  */
902 struct rdma_pwm_val32_data
903 {
904 	__le16 icid /* internal CID */;
905 	u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
906 	u8 params;
907 #define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK             0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
908 #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT            0
909 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK           0x1 /* enable QM bypass */
910 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT          2
911 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK  0x1 /* Connection type is iWARP */
912 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
913 #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK         0x1 /* Flag indicating 16b variable should be updated. Should be used when conn_type_is_iwarp is used */
914 #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT        4
915 #define RDMA_PWM_VAL32_DATA_RESERVED_MASK            0x7
916 #define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT           5
917 	__le32 value /* aggregated value to update */;
918 };
919 
920 
921 union rdma_pwm_val32_data_union
922 {
923 	struct rdma_pwm_val32_data as_struct /* Parameters field */;
924 	struct regpair as_repair;
925 };
926 
927 #endif /* __QED_HSI_RDMA__ */
928