@@ -8,7 +8,7 @@ use crate::fmt;
88use crate :: ops:: Deref ;
99use crate :: panic:: { RefUnwindSafe , UnwindSafe } ;
1010use crate :: sys:: sync as sys;
11- use crate :: thread:: ThreadId ;
11+ use crate :: thread:: { current_id , ThreadId } ;
1212
1313/// A re-entrant mutual exclusion lock
1414///
@@ -108,66 +108,66 @@ cfg_if!(
108108 self . 0 . store( value, Relaxed ) ;
109109 }
110110 }
111- } else if #[ cfg( target_has_atomic = "32" ) ] {
112- use crate :: sync:: atomic:: { AtomicU32 , Ordering :: { Acquire , Relaxed , Release } } ;
111+ } else {
112+ /// Returns the address of a TLS variable. This is guaranteed to
113+ /// be unique across all currently alive threads.
114+ fn tls_addr( ) -> usize {
115+ thread_local! { static X : u8 = const { 0u8 } } ;
116+
117+ X . with( |p| <* const u8 >:: addr( p) )
118+ }
119+
120+ use crate :: sync:: atomic:: {
121+ AtomicUsize ,
122+ Ordering ,
123+ } ;
113124
114125 struct Tid {
115- seq: AtomicU32 ,
116- low: AtomicU32 ,
117- high: AtomicU32 ,
126+ // When a thread calls `set()`, this value gets updated to
127+ // the address of a thread local on that thread. This is
128+ // used as a first check in `contains()`; if the `tls_addr`
129+ // doesn't match the TLS address of the current thread, then
130+ // the ThreadId also can't match. Only if the TLS addresses do
131+ // match do we read out the actual TID.
132+ // Note also that we can use relaxed atomic operations here, because
133+ // we only ever read from the tid if `tls_addr` matches the current
134+ // TLS address. In that case, either the the tid has been set by
135+ // the current thread, or by a thread that has terminated before
136+ // the current thread was created. In either case, no further
137+ // synchronization is needed (as per <https://github.com/rust-lang/miri/issues/3450>)
138+ tls_addr: AtomicUsize ,
139+ tid: UnsafeCell <u64 >,
118140 }
119141
142+ unsafe impl Send for Tid { }
143+ unsafe impl Sync for Tid { }
144+
120145 impl Tid {
121146 const fn new( ) -> Self {
122- Self {
123- seq: AtomicU32 :: new( 0 ) ,
124- low: AtomicU32 :: new( 0 ) ,
125- high: AtomicU32 :: new( 0 ) ,
126- }
147+ Self { tls_addr: AtomicUsize :: new( 0 ) , tid: UnsafeCell :: new( 0 ) }
127148 }
128149
129150 #[ inline]
130151 // NOTE: This assumes that `owner` is the ID of the current
131152 // thread, and may spuriously return `false` if that's not the case.
132153 fn contains( & self , owner: ThreadId ) -> bool {
133- // Synchronizes with the release-increment in `set()` to ensure
134- // we only read the data after it's been fully written.
135- let mut seq = self . seq. load( Acquire ) ;
136- loop {
137- if seq % 2 == 0 {
138- let low = self . low. load( Relaxed ) ;
139- let high = self . high. load( Relaxed ) ;
140- // The acquire-increment in `set()` synchronizes with this release
141- // store to ensure that `get()` doesn't see data from a subsequent
142- // `set()` call.
143- match self . seq. compare_exchange_weak( seq, seq, Release , Acquire ) {
144- Ok ( _) => {
145- let tid = u64 :: from( low) | ( u64 :: from( high) << 32 ) ;
146- return owner. as_u64( ) . get( ) == tid;
147- } ,
148- Err ( new) => seq = new,
149- }
150- } else {
151- // Another thread is currently writing to the seqlock. That thread
152- // must also be holding the mutex, so we can't currently be the lock owner.
153- return false ;
154- }
155- }
154+ // SAFETY: See the comments in the struct definition.
155+ self . tls_addr. load( Ordering :: Relaxed ) == tls_addr( )
156+ && unsafe { * self . tid. get( ) } == owner. as_u64( ) . get( )
156157 }
157158
158159 #[ inline]
159- // This may only be called from one thread at a time, otherwise
160- // concurrent `get()` calls may return teared data.
160+ // This may only be called by one thread at a time.
161161 fn set( & self , tid: Option <ThreadId >) {
162+ // It's important that we set `self.tls_addr` to 0 if the
163+ // tid is cleared. Otherwise, there might be race conditions between
164+ // `set()` and `get()`.
165+ let tls_addr = if tid. is_some( ) { tls_addr( ) } else { 0 } ;
162166 let value = tid. map_or( 0 , |tid| tid. as_u64( ) . get( ) ) ;
163- self . seq. fetch_add( 1 , Acquire ) ;
164- self . low. store( value as u32 , Relaxed ) ;
165- self . high. store( ( value >> 32 ) as u32 , Relaxed ) ;
166- self . seq. fetch_add( 1 , Release ) ;
167+ self . tls_addr. store( tls_addr, Ordering :: Relaxed ) ;
168+ unsafe { * self . tid. get( ) = value } ;
167169 }
168170 }
169- } else {
170- compile_error!( "`ReentrantLock` requires at least 32 bit atomics!" ) ;
171171 }
172172) ;
173173
@@ -272,7 +272,7 @@ impl<T: ?Sized> ReentrantLock<T> {
272272 /// assert_eq!(lock.lock().get(), 10);
273273 /// ```
274274 pub fn lock ( & self ) -> ReentrantLockGuard < ' _ , T > {
275- let this_thread = current_thread_id ( ) ;
275+ let this_thread = current_id ( ) ;
276276 // Safety: We only touch lock_count when we own the lock.
277277 unsafe {
278278 if self . owner . contains ( this_thread) {
@@ -314,7 +314,7 @@ impl<T: ?Sized> ReentrantLock<T> {
314314 ///
315315 /// This function does not block.
316316 pub ( crate ) fn try_lock ( & self ) -> Option < ReentrantLockGuard < ' _ , T > > {
317- let this_thread = current_thread_id ( ) ;
317+ let this_thread = current_id ( ) ;
318318 // Safety: We only touch lock_count when we own the lock.
319319 unsafe {
320320 if self . owner . contains ( this_thread) {
@@ -400,17 +400,3 @@ impl<T: ?Sized> Drop for ReentrantLockGuard<'_, T> {
400400 }
401401 }
402402}
403-
404- /// Returns the current thread's ThreadId value, which is guaranteed
405- /// to be unique across the lifetime of the process.
406- ///
407- /// Panics if called during a TLS destructor on a thread that hasn't
408- /// been assigned an ID.
409- pub ( crate ) fn current_thread_id ( ) -> ThreadId {
410- #[ cold]
411- fn no_tid ( ) -> ! {
412- rtabort ! ( "Thread hasn't been assigned an ID!" )
413- }
414-
415- crate :: thread:: try_current_id ( ) . unwrap_or_else ( || no_tid ( ) )
416- }
0 commit comments