1#ifndef __XEN_SYNCH_BITOPS_H__
2#define __XEN_SYNCH_BITOPS_H__
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 * Heavily modified to provide guaranteed strong synchronisation
7 * when communicating with Xen or other guest OSes running on other CPUs.
8 */
9
10#define ADDR (*(volatile long *) addr)
11
12static __inline__ void synch_set_bit(int nr, volatile void * addr)
13{
14    __asm__ __volatile__ (
15        "lock btsl %1,%0"
16        : "=m" (ADDR) : "Ir" (nr) : "memory" );
17}
18
19static __inline__ void synch_clear_bit(int nr, volatile void * addr)
20{
21    __asm__ __volatile__ (
22        "lock btrl %1,%0"
23        : "=m" (ADDR) : "Ir" (nr) : "memory" );
24}
25
26static __inline__ void synch_change_bit(int nr, volatile void * addr)
27{
28    __asm__ __volatile__ (
29        "lock btcl %1,%0"
30        : "=m" (ADDR) : "Ir" (nr) : "memory" );
31}
32
33static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
34{
35    int oldbit;
36    __asm__ __volatile__ (
37        "lock btsl %2,%1\n\tsbbl %0,%0"
38        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
39    return oldbit;
40}
41
42static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
43{
44    int oldbit;
45    __asm__ __volatile__ (
46        "lock btrl %2,%1\n\tsbbl %0,%0"
47        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
48    return oldbit;
49}
50
51static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
52{
53    int oldbit;
54
55    __asm__ __volatile__ (
56        "lock btcl %2,%1\n\tsbbl %0,%0"
57        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
58    return oldbit;
59}
60
61struct __synch_xchg_dummy { unsigned long a[100]; };
62#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
63
64#define synch_cmpxchg(ptr, old, new) \
65((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
66                                     (unsigned long)(old), \
67                                     (unsigned long)(new), \
68                                     sizeof(*(ptr))))
69
70static inline unsigned long __synch_cmpxchg(volatile void *ptr,
71					    unsigned long old,
72					    unsigned long new, int size)
73{
74	unsigned long prev;
75	switch (size) {
76	case 1:
77		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
78				     : "=a"(prev)
79				     : "q"(new), "m"(*__synch_xg(ptr)),
80				       "0"(old)
81				     : "memory");
82		return prev;
83	case 2:
84		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
85				     : "=a"(prev)
86				     : "q"(new), "m"(*__synch_xg(ptr)),
87				       "0"(old)
88				     : "memory");
89		return prev;
90#ifdef CONFIG_X86_64
91	case 4:
92		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
93				     : "=a"(prev)
94				     : "q"(new), "m"(*__synch_xg(ptr)),
95				       "0"(old)
96				     : "memory");
97		return prev;
98	case 8:
99		__asm__ __volatile__("lock; cmpxchgq %1,%2"
100				     : "=a"(prev)
101				     : "q"(new), "m"(*__synch_xg(ptr)),
102				       "0"(old)
103				     : "memory");
104		return prev;
105#else
106	case 4:
107		__asm__ __volatile__("lock; cmpxchgl %1,%2"
108				     : "=a"(prev)
109				     : "q"(new), "m"(*__synch_xg(ptr)),
110				       "0"(old)
111				     : "memory");
112		return prev;
113#endif
114	}
115	return old;
116}
117
118static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
119{
120    return ((1UL << (nr & 31)) &
121            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
122}
123
124static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
125{
126    int oldbit;
127    __asm__ __volatile__ (
128        "btl %2,%1\n\tsbbl %0,%0"
129        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
130    return oldbit;
131}
132
133#define synch_test_bit(nr,addr) \
134(__builtin_constant_p(nr) ? \
135 synch_const_test_bit((nr),(addr)) : \
136 synch_var_test_bit((nr),(addr)))
137
138#endif /* __XEN_SYNCH_BITOPS_H__ */
139