@@ -55,40 +55,47 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
55
55
} \
56
56
__LL_SC_EXPORT(atomic_##op);
57
57
58
- #define ATOMIC_OP_RETURN (op , asm_op ) \
58
+ #define ATOMIC_OP_RETURN (name , mb , acq , rel , cl , op , asm_op ) \
59
59
__LL_SC_INLINE int \
60
- __LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
60
+ __LL_SC_PREFIX(atomic_##op##_return##name (int i, atomic_t *v)) \
61
61
{ \
62
62
unsigned long tmp; \
63
63
int result; \
64
64
\
65
- asm volatile("// atomic_" #op "_return\n" \
65
+ asm volatile("// atomic_" #op "_return" #name " \n" \
66
66
" prfm pstl1strm, %2\n" \
67
- "1: ldxr %w0, %2\n" \
67
+ "1: ld" #acq "xr %w0, %2\n" \
68
68
" " #asm_op " %w0, %w0, %w3\n" \
69
- " stlxr %w1, %w0, %2\n" \
70
- " cbnz %w1, 1b" \
69
+ " st" #rel "xr %w1, %w0, %2\n" \
70
+ " cbnz %w1, 1b\n" \
71
+ " " #mb \
71
72
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
72
73
: "Ir" (i) \
73
- : "memory"); \
74
+ : cl); \
74
75
\
75
- smp_mb(); \
76
76
return result; \
77
77
} \
78
- __LL_SC_EXPORT(atomic_##op##_return);
78
+ __LL_SC_EXPORT(atomic_##op##_return##name);
79
+
80
+ #define ATOMIC_OPS (...) \
81
+ ATOMIC_OP(__VA_ARGS__) \
82
+ ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)
79
83
80
- #define ATOMIC_OPS (op , asm_op ) \
81
- ATOMIC_OP(op, asm_op) \
82
- ATOMIC_OP_RETURN(op, asm_op)
84
+ #define ATOMIC_OPS_RLX (...) \
85
+ ATOMIC_OPS(__VA_ARGS__) \
86
+ ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
87
+ ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
88
+ ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)
83
89
84
- ATOMIC_OPS (add , add )
85
- ATOMIC_OPS (sub , sub )
90
+ ATOMIC_OPS_RLX (add , add )
91
+ ATOMIC_OPS_RLX (sub , sub )
86
92
87
93
ATOMIC_OP (and , and )
88
94
ATOMIC_OP (andnot , bic )
89
95
ATOMIC_OP (or , orr )
90
96
ATOMIC_OP (xor , eor )
91
97
98
+ #undef ATOMIC_OPS_RLX
92
99
#undef ATOMIC_OPS
93
100
#undef ATOMIC_OP_RETURN
94
101
#undef ATOMIC_OP
@@ -111,40 +118,47 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
111
118
} \
112
119
__LL_SC_EXPORT(atomic64_##op);
113
120
114
- #define ATOMIC64_OP_RETURN (op , asm_op ) \
121
+ #define ATOMIC64_OP_RETURN (name , mb , acq , rel , cl , op , asm_op ) \
115
122
__LL_SC_INLINE long \
116
- __LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
123
+ __LL_SC_PREFIX(atomic64_##op##_return##name (long i, atomic64_t *v)) \
117
124
{ \
118
125
long result; \
119
126
unsigned long tmp; \
120
127
\
121
- asm volatile("// atomic64_" #op "_return\n" \
128
+ asm volatile("// atomic64_" #op "_return" #name " \n" \
122
129
" prfm pstl1strm, %2\n" \
123
- "1: ldxr %0, %2\n" \
130
+ "1: ld" #acq "xr %0, %2\n" \
124
131
" " #asm_op " %0, %0, %3\n" \
125
- " stlxr %w1, %0, %2\n" \
126
- " cbnz %w1, 1b" \
132
+ " st" #rel "xr %w1, %0, %2\n" \
133
+ " cbnz %w1, 1b\n" \
134
+ " " #mb \
127
135
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
128
136
: "Ir" (i) \
129
- : "memory"); \
137
+ : cl); \
130
138
\
131
- smp_mb(); \
132
139
return result; \
133
140
} \
134
- __LL_SC_EXPORT(atomic64_##op##_return);
141
+ __LL_SC_EXPORT(atomic64_##op##_return##name);
142
+
143
+ #define ATOMIC64_OPS (...) \
144
+ ATOMIC64_OP(__VA_ARGS__) \
145
+ ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__)
135
146
136
- #define ATOMIC64_OPS (op , asm_op ) \
137
- ATOMIC64_OP(op, asm_op) \
138
- ATOMIC64_OP_RETURN(op, asm_op)
147
+ #define ATOMIC64_OPS_RLX (...) \
148
+ ATOMIC64_OPS(__VA_ARGS__) \
149
+ ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
150
+ ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
151
+ ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__)
139
152
140
- ATOMIC64_OPS (add , add )
141
- ATOMIC64_OPS (sub , sub )
153
+ ATOMIC64_OPS_RLX (add , add )
154
+ ATOMIC64_OPS_RLX (sub , sub )
142
155
143
156
ATOMIC64_OP (and , and )
144
157
ATOMIC64_OP (andnot , bic )
145
158
ATOMIC64_OP (or , orr )
146
159
ATOMIC64_OP (xor , eor )
147
160
161
+ #undef ATOMIC64_OPS_RLX
148
162
#undef ATOMIC64_OPS
149
163
#undef ATOMIC64_OP_RETURN
150
164
#undef ATOMIC64_OP
@@ -172,7 +186,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
172
186
}
173
187
__LL_SC_EXPORT (atomic64_dec_if_positive );
174
188
175
- #define __CMPXCHG_CASE (w , sz , name , mb , rel , cl ) \
189
+ #define __CMPXCHG_CASE (w , sz , name , mb , acq , rel , cl ) \
176
190
__LL_SC_INLINE unsigned long \
177
191
__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
178
192
unsigned long old, \
@@ -182,7 +196,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
182
196
\
183
197
asm volatile( \
184
198
" prfm pstl1strm, %[v]\n" \
185
- "1: ldxr " #sz "\t%" #w "[oldval], %[v]\n" \
199
+ "1: ld " #acq "xr" # sz "\t%" #w "[oldval], %[v]\n" \
186
200
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
187
201
" cbnz %" #w "[tmp], 2f\n" \
188
202
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
@@ -199,14 +213,22 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
199
213
} \
200
214
__LL_SC_EXPORT(__cmpxchg_case_##name);
201
215
202
- __CMPXCHG_CASE (w , b , 1 , , , )
203
- __CMPXCHG_CASE (w , h , 2 , , , )
204
- __CMPXCHG_CASE (w , , 4 , , , )
205
- __CMPXCHG_CASE ( , , 8 , , , )
206
- __CMPXCHG_CASE (w , b , mb_1 , dmb ish , l , "memory" )
207
- __CMPXCHG_CASE (w , h , mb_2 , dmb ish , l , "memory" )
208
- __CMPXCHG_CASE (w , , mb_4 , dmb ish , l , "memory" )
209
- __CMPXCHG_CASE ( , , mb_8 , dmb ish , l , "memory" )
216
+ __CMPXCHG_CASE (w , b , 1 , , , , )
217
+ __CMPXCHG_CASE (w , h , 2 , , , , )
218
+ __CMPXCHG_CASE (w , , 4 , , , , )
219
+ __CMPXCHG_CASE ( , , 8 , , , , )
220
+ __CMPXCHG_CASE (w , b , acq_1 , , a , , "memory" )
221
+ __CMPXCHG_CASE (w , h , acq_2 , , a , , "memory" )
222
+ __CMPXCHG_CASE (w , , acq_4 , , a , , "memory" )
223
+ __CMPXCHG_CASE ( , , acq_8 , , a , , "memory" )
224
+ __CMPXCHG_CASE (w , b , rel_1 , , , l , "memory" )
225
+ __CMPXCHG_CASE (w , h , rel_2 , , , l , "memory" )
226
+ __CMPXCHG_CASE (w , , rel_4 , , , l , "memory" )
227
+ __CMPXCHG_CASE ( , , rel_8 , , , l , "memory" )
228
+ __CMPXCHG_CASE (w , b , mb_1 , dmb ish , , l , "memory" )
229
+ __CMPXCHG_CASE (w , h , mb_2 , dmb ish , , l , "memory" )
230
+ __CMPXCHG_CASE (w , , mb_4 , dmb ish , , l , "memory" )
231
+ __CMPXCHG_CASE ( , , mb_8 , dmb ish , , l , "memory" )
210
232
211
233
#undef __CMPXCHG_CASE
212
234
0 commit comments