1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2012 Nexenta Systems, Inc.  All rights reserved.
24  */
25 
26 #ifndef _RDSV3_IMPL_H
27 #define	_RDSV3_IMPL_H
28 
29 #include <sys/atomic.h>
30 
31 /*
32  * This file is only present in Solaris
33  */
34 
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38 
39 extern dev_info_t	*rdsv3_dev_info;
40 
41 #define	uint16_be_t	uint16_t
42 #define	uint32_be_t	uint32_t
43 #define	uint64_be_t	uint64_t
44 
45 /*
46  * RDS Well known service id
47  * Format: 0x1h00144Fhhhhhhhh
48  *         "00144F" is the Sun OUI
49  * 'h' can be any hex-decimal digit.
50  */
51 #define	RDS_SERVICE_ID		0x1000144F00000001ULL
52 
53 /*
54  * Atomic operations
55  */
56 typedef unsigned int	atomic_t;
57 #define	ATOMIC_INIT(a)	a
58 
59 #define	atomic_get(p)	(*(p))
60 
61 #define	atomic_cmpset_long(p, c, n) \
62 	((c == atomic_cas_uint(p, c, n)) ? c : -1)
63 
64 #define	atomic_dec_and_test(a)			\
65 	(atomic_dec_uint_nv((a)) == 0)
66 
67 #define	atomic_cmpxchg(a, o, n)			\
68 	atomic_cas_uint(a, o, n)
69 
70 #ifdef _LP64
71 #define	set_bit(b, p) \
72 	atomic_or_ulong(((volatile ulong_t *)(void *)(p)) + ((b) >> 6), \
73 	1ul << ((b) & 0x3f))
74 
75 #define	clear_bit(b, p) \
76 	atomic_and_ulong(((volatile ulong_t *)(void *)(p)) + ((b) >> 6), \
77 	~(1ul << ((b) & 0x3f)))
78 
79 #define	test_bit(b, p) \
80 	(((volatile ulong_t *)(void *)(p))[(b) >> 6] & (1ul << ((b) & 0x3f)))
81 
82 #define	test_and_set_bit(b, p) \
83 	atomic_set_long_excl(((ulong_t *)(void *)(p)) +		\
84 	    ((b) >> 6), ((b) & 0x3f))
85 #define	test_and_clear_bit(b, p) \
86 	!atomic_clear_long_excl(((ulong_t *)(void *)(p)) + ((b) >> 6), \
87 	((b) & 0x3f))
88 #else
89 #define	set_bit(b, p) \
90 	atomic_or_uint(((volatile uint_t *)(void *)p) + (b >> 5), \
91 	1ul << (b & 0x1f))
92 
93 #define	clear_bit(b, p) \
94 	atomic_and_uint(((volatile uint_t *)(void *)p) + (b >> 5), \
95 	~(1ul << (b & 0x1f)))
96 
97 #define	test_bit(b, p) \
98 	(((volatile uint_t *)(void *)p)[b >> 5] & (1ul << (b & 0x1f)))
99 
100 #define	test_and_set_bit(b, p) \
101 	atomic_set_long_excl(((ulong_t *)(void *)p) + (b >> 5), (b & 0x1f))
102 #define	test_and_clear_bit(b, p) \
103 	!atomic_clear_long_excl(((ulong_t *)(void *)p) + (b >> 5), (b & 0x1f))
104 #endif
105 
106 /*
107  * These macros and/or constants are used instead of Linux
108  * generic_{test,__{clear,set}}_le_bit().
109  */
110 #if defined(sparc)
111 #define	LE_BIT_XOR	((BITS_PER_LONG-1) & ~0x7)
112 #else
113 #define	LE_BIT_XOR	0
114 #endif
115 
116 #define	set_le_bit(b, p)	set_bit(b ^ LE_BIT_XOR, p)
117 #define	clear_le_bit(b, p)	clear_bit(b ^ LE_BIT_XOR, p)
118 #define	test_le_bit(b, p)	test_bit(b ^ LE_BIT_XOR, p)
119 
120 extern uint_t	rdsv3_one_sec_in_hz;
121 
122 #define	jiffies	100
123 #define	HZ	(drv_hztousec(1))
124 /* setting this to PAGESIZE throws build errors */
125 #define	PAGE_SIZE	4096 /* xxx - fix this */
126 #define	BITS_PER_LONG	(sizeof (unsigned long) * 8)
127 
128 /* debug */
129 #define	RDSV3_PANIC()		cmn_err(CE_PANIC, "Panic forced by RDSV3");
130 
131 /* ERR */
132 #define	MAX_ERRNO	4095
133 #define	ERR_PTR(x)	((void *)(uintptr_t)x)
134 #define	IS_ERR(ptr)	(((uintptr_t)ptr) >= (uintptr_t)-MAX_ERRNO)
135 #define	PTR_ERR(ptr)	(int)(uintptr_t)ptr
136 
137 #define	MAX_SCHEDULE_TIMEOUT	(~0UL>>1)
138 
139 /* list */
140 /* copied and modified list_remove_node */
141 #define	list_remove_node(node)						\
142 	if ((node)->list_next != NULL) {				\
143 		(node)->list_prev->list_next = (node)->list_next;	\
144 		(node)->list_next->list_prev = (node)->list_prev;	\
145 		(node)->list_next = (node)->list_prev = NULL;		\
146 	}
147 
148 #define	list_splice(src, dst)	{				\
149 	list_create(dst, (src)->list_size, (src)->list_offset);	\
150 	list_move_tail(dst, src);				\
151 	}
152 
153 #define	RDSV3_FOR_EACH_LIST_NODE(objp, listp, member)	\
154 	for (objp = list_head(listp); objp; objp = list_next(listp, objp))
155 #define	RDSV3_FOR_EACH_LIST_NODE_SAFE(objp, tmp, listp, member)	\
156 	for (objp = list_head(listp), tmp = (objp != NULL) ?	\
157 	    list_next(listp, objp) : NULL;			\
158 	    objp;						\
159 	    objp = tmp, tmp = (objp != NULL) ?			\
160 	    list_next(listp, objp) : NULL)
161 
162 /* simulate wait_queue_head_t */
163 typedef struct rdsv3_wait_queue_s {
164 	kmutex_t	waitq_mutex;
165 	kcondvar_t	waitq_cv;
166 	uint_t		waitq_waiters;
167 } rdsv3_wait_queue_t;
168 
169 #define	rdsv3_init_waitqueue(waitqp)					\
170 	mutex_init(&(waitqp)->waitq_mutex, NULL, MUTEX_DRIVER, NULL);	\
171 	cv_init(&(waitqp)->waitq_cv, NULL, CV_DRIVER, NULL);		\
172 	(waitqp)->waitq_waiters = 0
173 
174 #define	rdsv3_exit_waitqueue(waitqp)					\
175 	ASSERT((waitqp)->waitq_waiters == 0);				\
176 	mutex_destroy(&(waitqp)->waitq_mutex);				\
177 	cv_destroy(&(waitqp)->waitq_cv)
178 
179 #define	rdsv3_wake_up(waitqp)	{					\
180 	mutex_enter(&(waitqp)->waitq_mutex);				\
181 	if ((waitqp)->waitq_waiters)					\
182 		cv_signal(&(waitqp)->waitq_cv);				\
183 	mutex_exit(&(waitqp)->waitq_mutex);				\
184 	}
185 
186 #define	rdsv3_wake_up_all(waitqp)	{				\
187 	mutex_enter(&(waitqp)->waitq_mutex);				\
188 	if ((waitqp)->waitq_waiters)					\
189 		cv_broadcast(&(waitqp)->waitq_cv);			\
190 	mutex_exit(&(waitqp)->waitq_mutex);				\
191 	}
192 
193 /* analogous to cv_wait */
194 #define	rdsv3_wait_event(waitq, condition)				\
195 {									\
196 	mutex_enter(&(waitq)->waitq_mutex);				\
197 	(waitq)->waitq_waiters++;					\
198 	while (!(condition)) {						\
199 		cv_wait(&(waitq)->waitq_cv, &(waitq)->waitq_mutex);	\
200 	}								\
201 	(waitq)->waitq_waiters--;					\
202 	mutex_exit(&(waitq)->waitq_mutex);				\
203 }
204 
205 /* analogous to cv_wait_sig */
206 #define	rdsv3_wait_sig(waitqp, condition)				\
207 (									\
208 {									\
209 	int cv_return = 1;						\
210 	mutex_enter(&(waitqp)->waitq_mutex);				\
211 	(waitqp)->waitq_waiters++;					\
212 	while (!(condition)) {						\
213 		cv_return = cv_wait_sig(&(waitqp)->waitq_cv,		\
214 		    &(waitqp)->waitq_mutex);				\
215 		if (cv_return == 0) {					\
216 			break;						\
217 		}							\
218 	}								\
219 	(waitqp)->waitq_waiters--;					\
220 	mutex_exit(&(waitqp)->waitq_mutex);				\
221 	cv_return;							\
222 }									\
223 )
224 
225 #define	SOCK_DEAD	1ul
226 
227 /* socket */
228 typedef struct rsock {
229 	sock_upper_handle_t	sk_upper_handle;
230 	sock_upcalls_t		*sk_upcalls;
231 
232 	kmutex_t		sk_lock;
233 	ulong_t			sk_flag;
234 	rdsv3_wait_queue_t	*sk_sleep; /* Also protected by rs_recv_lock */
235 	int			sk_sndbuf;
236 	int			sk_rcvbuf;
237 	atomic_t		sk_refcount;
238 
239 	struct rdsv3_sock	*sk_protinfo;
240 } rsock_t;
241 
242 typedef struct rdsv3_conn_info_s {
243 	uint32_be_t  c_laddr;
244 	uint32_be_t  c_faddr;
245 } rdsv3_conn_info_t;
246 
247 /* WQ */
248 typedef struct rdsv3_workqueue_struct_s {
249 	kmutex_t wq_lock;
250 	uint_t	wq_state;
251 	int	wq_pending;
252 	list_t	wq_queue;
253 } rdsv3_workqueue_struct_t;
254 
255 struct rdsv3_work_s;
256 typedef void (*rdsv3_work_func_t)(struct rdsv3_work_s *);
257 typedef struct rdsv3_work_s {
258 	list_node_t	work_item;
259 	rdsv3_work_func_t	func;
260 } rdsv3_work_t;
261 
262 /* simulate delayed_work */
263 typedef struct rdsv3_delayed_work_s {
264 	kmutex_t		lock;
265 	rdsv3_work_t		work;
266 	timeout_id_t		timeid;
267 	rdsv3_workqueue_struct_t	*wq;
268 } rdsv3_delayed_work_t;
269 
270 #define	RDSV3_INIT_WORK(wp, f)	(wp)->func = f
271 #define	RDSV3_INIT_DELAYED_WORK(dwp, f)				\
272 	(dwp)->work.func = f;					\
273 	mutex_init(&(dwp)->lock, NULL, MUTEX_DRIVER, NULL);	\
274 	(dwp)->timeid = 0
275 
276 /* simulate scatterlist */
277 struct rdsv3_scatterlist {
278 	caddr_t		vaddr;
279 	uint_t		length;
280 	ibt_wr_ds_t	*sgl;
281 	ibt_mi_hdl_t	mihdl;
282 };
283 #define	rdsv3_sg_page(scat)	(scat)->vaddr
284 #define	rdsv3_sg_len(scat)	(scat)->length
285 #define	rdsv3_sg_set_page(scat, pg, len, off)		\
286 	(scat)->vaddr = (caddr_t)(pg + off);		\
287 	(scat)->length = len
288 #define	rdsv3_ib_sg_dma_len(dev, scat)	rdsv3_sg_len(scat)
289 
290 /* copied from sys/socket.h */
291 #if defined(__sparc)
292 /* To maintain backward compatibility, alignment needs to be 8 on sparc. */
293 #define	_CMSG_HDR_ALIGNMENT	8
294 #else
295 /* for __i386 (and other future architectures) */
296 #define	_CMSG_HDR_ALIGNMENT	4
297 #endif	/* defined(__sparc) */
298 
299 /*
300  * The cmsg headers (and macros dealing with them) were made available as
301  * part of UNIX95 and hence need to be protected with a _XPG4_2 define.
302  */
303 #define	_CMSG_DATA_ALIGNMENT	(sizeof (int))
304 #define	_CMSG_HDR_ALIGN(x)	(((uintptr_t)(x) + _CMSG_HDR_ALIGNMENT - 1) & \
305 				    ~(_CMSG_HDR_ALIGNMENT - 1))
306 #define	_CMSG_DATA_ALIGN(x)	(((uintptr_t)(x) + _CMSG_DATA_ALIGNMENT - 1) & \
307 				    ~(_CMSG_DATA_ALIGNMENT - 1))
308 #define	CMSG_DATA(c)							\
309 	((unsigned char *)_CMSG_DATA_ALIGN((struct cmsghdr *)(c) + 1))
310 
311 #define	CMSG_FIRSTHDR(m)						\
312 	(((m)->msg_controllen < sizeof (struct cmsghdr)) ?		\
313 	    (struct cmsghdr *)0 : (struct cmsghdr *)((m)->msg_control))
314 
315 #define	CMSG_NXTHDR(m, c)						\
316 	(((c) == 0) ? CMSG_FIRSTHDR(m) :			\
317 	((((uintptr_t)_CMSG_HDR_ALIGN((char *)(c) +			\
318 	((struct cmsghdr *)(c))->cmsg_len) + sizeof (struct cmsghdr)) >	\
319 	(((uintptr_t)((struct msghdr *)(m))->msg_control) +		\
320 	((uintptr_t)((struct msghdr *)(m))->msg_controllen))) ?		\
321 	((struct cmsghdr *)0) :						\
322 	((struct cmsghdr *)_CMSG_HDR_ALIGN((char *)(c) +		\
323 	    ((struct cmsghdr *)(c))->cmsg_len))))
324 
325 /* Amount of space + padding needed for a message of length l */
326 #define	CMSG_SPACE(l)							\
327 	((unsigned int)_CMSG_HDR_ALIGN(sizeof (struct cmsghdr) + (l)))
328 
329 /* Value to be used in cmsg_len, does not include trailing padding */
330 #define	CMSG_LEN(l)							\
331 	((unsigned int)_CMSG_DATA_ALIGN(sizeof (struct cmsghdr)) + (l))
332 
333 /* OFUV -> IB */
334 #define	RDSV3_IBDEV2HCAHDL(device)	(device)->hca_hdl
335 #define	RDSV3_QP2CHANHDL(qp)		(qp)->ibt_qp
336 #define	RDSV3_PD2PDHDL(pd)		(pd)->ibt_pd
337 #define	RDSV3_CQ2CQHDL(cq)		(cq)->ibt_cq
338 
339 struct rdsv3_hdrs_mr {
340 	ibt_lkey_t	lkey;
341 	caddr_t		addr;
342 	size_t		size;
343 	ibt_mr_hdl_t	hdl;
344 };
345 
346 /* rdsv3_impl.c */
347 void rdsv3_trans_init();
348 boolean_t rdsv3_capable_interface(struct lifreq *lifrp);
349 int rdsv3_do_ip_ioctl(ksocket_t so4, void **ipaddrs, int *size, int *nifs);
350 int rdsv3_do_ip_ioctl_old(ksocket_t so4, void **ipaddrs, int *size, int *nifs);
351 boolean_t rdsv3_isloopback(ipaddr_t addr);
352 void rdsv3_cancel_delayed_work(rdsv3_delayed_work_t *dwp);
353 void rdsv3_flush_workqueue(rdsv3_workqueue_struct_t *wq);
354 void rdsv3_queue_work(rdsv3_workqueue_struct_t *wq, rdsv3_work_t *wp);
355 void rdsv3_queue_delayed_work(rdsv3_workqueue_struct_t *wq,
356     rdsv3_delayed_work_t *dwp, uint_t delay);
357 struct rsock *rdsv3_sk_alloc();
358 void rdsv3_sock_init_data(struct rsock *sk);
359 void rdsv3_sock_exit_data(struct rsock *sk);
360 void rdsv3_destroy_task_workqueue(rdsv3_workqueue_struct_t *wq);
361 rdsv3_workqueue_struct_t *rdsv3_create_task_workqueue(char *name);
362 int rdsv3_conn_constructor(void *buf, void *arg, int kmflags);
363 void rdsv3_conn_destructor(void *buf, void *arg);
364 int rdsv3_conn_compare(const void *conn1, const void *conn2);
365 void rdsv3_loop_init();
366 int rdsv3_mr_compare(const void *mr1, const void *mr2);
367 int rdsv3_put_cmsg(struct nmsghdr *msg, int level, int type, size_t size,
368     void *payload);
369 int rdsv3_verify_bind_address(ipaddr_t addr);
370 uint16_t rdsv3_ip_fast_csum(void *buffer, size_t length);
371 uint_t rdsv3_ib_dma_map_sg(struct ib_device *dev, struct rdsv3_scatterlist
372 	*scat, uint_t num);
373 void rdsv3_ib_dma_unmap_sg(ib_device_t *dev, struct rdsv3_scatterlist *scat,
374     uint_t num);
375 static inline void
rdsv3_sk_sock_hold(struct rsock * sk)376 rdsv3_sk_sock_hold(struct rsock *sk)
377 {
378 	atomic_inc_32(&sk->sk_refcount);
379 }
380 static inline void
rdsv3_sk_sock_put(struct rsock * sk)381 rdsv3_sk_sock_put(struct rsock *sk)
382 {
383 	if (atomic_dec_and_test(&sk->sk_refcount))
384 		rdsv3_sock_exit_data(sk);
385 }
386 static inline int
rdsv3_sk_sock_flag(struct rsock * sk,uint_t flag)387 rdsv3_sk_sock_flag(struct rsock *sk, uint_t flag)
388 {
389 	return (test_bit(flag, &sk->sk_flag));
390 }
391 static inline void
rdsv3_sk_sock_orphan(struct rsock * sk)392 rdsv3_sk_sock_orphan(struct rsock *sk)
393 {
394 	set_bit(SOCK_DEAD, &sk->sk_flag);
395 }
396 
397 #define	rdsv3_sndtimeo(a, b)	b ? 0 : 3600	/* check this value on linux */
398 #define	rdsv3_rcvtimeo(a, b)	b ? 0 : 3600	/* check this value on linux */
399 
400 void rdsv3_ib_free_conn(void *arg);
401 
402 #ifdef	__cplusplus
403 }
404 #endif
405 
406 #endif /* _RDSV3_IMPL_H */
407