@@ -250,15 +250,23 @@ impl<T> RingBuffer<T> {
250
250
unsafe fn abandon < T > ( buffer : NonNull < RingBuffer < T > > ) {
251
251
// The "store" part of `fetch_or()` has to use `Release` to make sure that any previous writes
252
252
// to the ring buffer happen before it (in the thread that abandons first).
253
- // The "load" part has to use `Acquire` to make sure that reading `head` and `tail`
254
- // in the destructor happens after it (in the thread that drops the `RingBuffer`).
255
253
if buffer
256
254
. as_ref ( )
257
255
. is_abandoned
258
- . fetch_or ( true , Ordering :: AcqRel )
256
+ . fetch_or ( true , Ordering :: Release )
259
257
{
260
- // The flag was already set, i.e. the other thread has already abandoned
261
- // the RingBuffer and it can be dropped now.
258
+ // The flag was already set, i.e. the other thread has already abandoned the RingBuffer
259
+ // and it can be dropped now.
260
+
261
+ // However, since the load of `is_abandoned` was `Relaxed`,
262
+ // we have to use `Acquire` here to make sure that reading `head` and `tail`
263
+ // in the destructor happens after this point.
264
+
265
+ // Ideally, we would use a memory fence like this:
266
+ //core::sync::atomic::fence(Ordering::Acquire);
267
+ // ... but as long as ThreadSanitizer doesn't support fences,
268
+ // we use load(Acquire) as a work-around to avoid false positives:
269
+ let _ = buffer. as_ref ( ) . is_abandoned . load ( Ordering :: Acquire ) ;
262
270
drop_slow ( buffer) ;
263
271
} else {
264
272
// The flag wasn't set before, so we are the first to abandon the RingBuffer
0 commit comments