1/*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source.  A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12/*
13 * Copyright 2014 Pluribus Networks Inc.
14 * Copyright 2019 Joyent, Inc.
15 */
16
17#ifndef _COMPAT_FREEBSD_AMD64_MACHINE_ATOMIC_H_
18#define	_COMPAT_FREEBSD_AMD64_MACHINE_ATOMIC_H_
19
20static __inline u_int
21atomic_load_acq_short(volatile u_short *p)
22{
23	u_short res;
24
25	res = *p;
26	__asm volatile("" : : : "memory");
27
28	return (res);
29}
30
31static __inline u_int
32atomic_load_acq_int(volatile u_int *p)
33{
34	u_int res;
35
36	res = *p;
37	__asm volatile("" : : : "memory");
38
39	return (res);
40}
41
42static __inline u_long
43atomic_load_acq_long(volatile u_long *p)
44{
45	u_long res;
46
47	res = *p;
48	__asm volatile("" : : : "memory");
49
50	return (res);
51}
52
53static __inline void
54atomic_store_rel_int(volatile u_int *p, u_int v)
55{
56	__asm volatile("" : : : "memory");
57	*p = v;
58}
59
60static __inline void
61atomic_store_rel_long(volatile u_long *p, u_long v)
62{
63	__asm volatile("" : : : "memory");
64	*p = v;
65}
66
67/*
68 * Atomic compare and set.
69 *
70 * if (*dst == expect) *dst = src (all 32 bit words)
71 *
72 * Returns 0 on failure, non-zero on success
73 */
74static __inline int
75atomic_cmpset_int(volatile u_int *dst, u_int expect, u_int src)
76{
77	u_char res;
78
79	__asm __volatile(
80	"	lock ;			"
81	"	cmpxchgl %3,%1 ;	"
82	"       sete	%0 ;		"
83	"# atomic_cmpset_int"
84	: "=q" (res),			/* 0 */
85	  "+m" (*dst),			/* 1 */
86	  "+a" (expect)			/* 2 */
87	: "r" (src)			/* 3 */
88	: "memory", "cc");
89	return (res);
90}
91
92static __inline int
93atomic_cmpset_long(volatile u_long *dst, u_long expect, u_long src)
94{
95	u_char res;
96
97	__asm __volatile(
98	"	lock ;			"
99	"	cmpxchgq %3,%1 ;	"
100	"       sete	%0 ;		"
101	"# atomic_cmpset_long"
102	: "=q" (res),			/* 0 */
103	  "+m" (*dst),			/* 1 */
104	  "+a" (expect)			/* 2 */
105	: "r" (src)			/* 3 */
106	: "memory", "cc");
107	return (res);
108}
109
110static __inline int
111atomic_testandset_int(volatile u_int *p, u_int v)
112{
113	u_char res;
114
115	__asm __volatile(
116	"	lock ;			"
117	"	btsl	%2,%1 ;		"
118	"	setc	%0 ;		"
119	"# atomic_testandset_int"
120	: "=q" (res),		/* 0 */
121	"+m" (*p)		/* 1 */
122	: "Ir" (v & 0x1f)	/* 2 */
123	: "cc");
124	return (res);
125}
126
127/*
128 * Atomically add the value of v to the integer pointed to by p and return
129 * the previous value of *p.
130 */
131static __inline u_int
132atomic_fetchadd_int(volatile u_int *p, u_int v)
133{
134
135	__asm __volatile(
136	"	lock ;			"
137	"	xaddl	%0, %1 ;	"
138	"# atomic_fetchadd_int"
139	: "+r" (v),			/* 0 (result) */
140	  "=m" (*p)			/* 1 */
141	: "m" (*p)			/* 2 */
142	: "cc");
143	return (v);
144}
145
146static __inline void
147atomic_set_int(volatile u_int *p, u_int v)
148{
149	__asm volatile(
150	"lock ; " "orl %1,%0"
151	: "=m" (*p)
152	: "ir" (v), "m" (*p)
153	: "cc");
154}
155
156static __inline void
157atomic_clear_int(volatile u_int *p, u_int v)
158{
159	__asm volatile(
160	"lock ; " "andl %1,%0"
161	: "=m" (*p)
162	: "ir" (~v), "m" (*p)
163	: "cc");
164}
165
166static __inline void
167atomic_subtract_int(volatile u_int *p, u_int v)
168{
169	__asm volatile(
170	"lock ; " "subl %1,%0"
171	: "=m" (*p)
172	: "ir" (v), "m" (*p)
173	: "cc");
174}
175
176static __inline void
177atomic_set_long(volatile u_long *p, u_long v)
178{
179	__asm volatile(
180	"lock ; " "orq %1,%0"
181	: "+m" (*p)
182	: "ir" (v)
183	: "cc");
184}
185
186static __inline void
187atomic_clear_long(volatile u_long *p, u_long v)
188{
189	__asm volatile("lock ; " "andq %1,%0"
190	: "+m" (*p)
191	: "ir" (~v)
192	: "cc");
193}
194
195static __inline u_int
196atomic_swap_int(volatile u_int *p, u_int v)
197{
198
199	__asm __volatile(
200	"	xchgl	%1,%0 ;		"
201	"# atomic_swap_int"
202	: "+r" (v),			/* 0 */
203	  "+m" (*p));			/* 1 */
204	return (v);
205}
206
207static __inline u_long
208atomic_swap_long(volatile u_long *p, u_long v)
209{
210
211	__asm __volatile(
212	"	xchgq	%1,%0 ;		"
213	"# atomic_swap_long"
214	: "+r" (v),			/* 0 */
215	  "+m" (*p));			/* 1 */
216	return (v);
217}
218
219
220#define	atomic_store_short(p, v)	\
221	    (*(volatile u_short *)(p) = (u_short)(v))
222#define	atomic_store_int(p, v)		\
223	    (*(volatile u_int *)(p) = (u_int)(v))
224
225
226#define	atomic_readandclear_int(p)	atomic_swap_int(p, 0)
227#define	atomic_readandclear_long(p)	atomic_swap_long(p, 0)
228
229/* Operations on 32-bit double words. */
230#define	atomic_load_acq_32	atomic_load_acq_int
231#define	atomic_store_rel_32	atomic_store_rel_int
232#define	atomic_cmpset_32	atomic_cmpset_int
233
234/* Operations on 64-bit quad words. */
235#define	atomic_cmpset_64	atomic_cmpset_long
236#define	atomic_readandclear_64	atomic_readandclear_long
237
238/* Operations on pointers. */
239#define	atomic_cmpset_ptr	atomic_cmpset_long
240
241/* Needed for the membar functions */
242#include_next <sys/atomic.h>
243
244static __inline void
245atomic_thread_fence_rel(void)
246{
247	/* Equivalent to their __compiler_membar() */
248	__asm __volatile(" " : : : "memory");
249}
250
251static __inline void
252atomic_thread_fence_seq_cst(void)
253{
254	/* Equivalent to their !KERNEL storeload_barrer() */
255	__asm __volatile("lock; addl $0,-8(%%rsp)" : : : "memory", "cc");
256}
257
258#define	mb()			membar_enter()
259#define	rmb()			membar_consumer()
260#define	wmb()			membar_producer()
261
262#endif	/* _COMPAT_FREEBSD_AMD64_MACHINE_ATOMIC_H_ */
263