@@ -22,7 +22,6 @@ use std::borrow;
2222use std:: comm;
2323use std:: comm:: SendDeferred ;
2424use std:: comm:: { GenericPort , Peekable } ;
25- use std:: task;
2625use std:: unstable:: sync:: { Exclusive , UnsafeArc } ;
2726use std:: unstable:: atomics;
2827use std:: unstable:: finally:: Finally ;
@@ -134,13 +133,11 @@ impl<Q:Send> Sem<Q> {
134133 }
135134
136135 pub fn access < U > ( & self , blk: || -> U ) -> U {
137- do task:: unkillable {
138- do ( || {
139- self . acquire ( ) ;
140- do task:: rekillable { blk( ) }
141- } ) . finally {
142- self . release ( ) ;
143- }
136+ do ( || {
137+ self . acquire ( ) ;
138+ blk ( )
139+ } ) . finally {
140+ self . release ( ) ;
144141 }
145142 }
146143}
@@ -206,48 +203,41 @@ impl<'self> Condvar<'self> {
206203 pub fn wait_on ( & self , condvar_id : uint ) {
207204 let mut WaitEnd = None ;
208205 let mut out_of_bounds = None ;
209- do task:: unkillable {
210- // Release lock, 'atomically' enqueuing ourselves in so doing.
211- unsafe {
212- do ( * * self . sem ) . with |state| {
213- if condvar_id < state. blocked . len ( ) {
214- // Drop the lock.
215- state. count += 1 ;
216- if state. count <= 0 {
217- state. waiters . signal ( ) ;
218- }
219- // Create waiter nobe, and enqueue ourself to
220- // be woken up by a signaller.
221- WaitEnd = Some ( state. blocked [ condvar_id] . wait_end ( ) ) ;
222- } else {
223- out_of_bounds = Some ( state. blocked . len ( ) ) ;
206+ // Release lock, 'atomically' enqueuing ourselves in so doing.
207+ unsafe {
208+ do ( * * self . sem ) . with |state| {
209+ if condvar_id < state. blocked . len ( ) {
210+ // Drop the lock.
211+ state. count += 1 ;
212+ if state. count <= 0 {
213+ state. waiters . signal ( ) ;
224214 }
215+ // Create waiter nobe, and enqueue ourself to
216+ // be woken up by a signaller.
217+ WaitEnd = Some ( state. blocked [ condvar_id] . wait_end ( ) ) ;
218+ } else {
219+ out_of_bounds = Some ( state. blocked . len ( ) ) ;
225220 }
226221 }
222+ }
227223
228- // If deschedule checks start getting inserted anywhere, we can be
229- // killed before or after enqueueing. Deciding whether to
230- // unkillably reacquire the lock needs to happen atomically
231- // wrt enqueuing.
232- do check_cvar_bounds( out_of_bounds, condvar_id, "cond.wait_on()" ) {
233- // Unconditionally "block". (Might not actually block if a
234- // signaller already sent -- I mean 'unconditionally' in contrast
235- // with acquire().)
236- do ( || {
237- do task:: rekillable {
238- let _ = WaitEnd . take_unwrap ( ) . recv ( ) ;
239- }
240- } ) . finally {
241- // Reacquire the condvar. Note this is back in the unkillable
242- // section; it needs to succeed, instead of itself dying.
243- match self . order {
244- Just ( lock) => do lock. access {
245- self . sem . acquire ( ) ;
246- } ,
247- Nothing => {
248- self . sem . acquire ( ) ;
249- } ,
250- }
224+ // If deschedule checks start getting inserted anywhere, we can be
225+ // killed before or after enqueueing.
226+ do check_cvar_bounds ( out_of_bounds, condvar_id, "cond.wait_on()" ) {
227+ // Unconditionally "block". (Might not actually block if a
228+ // signaller already sent -- I mean 'unconditionally' in contrast
229+ // with acquire().)
230+ do ( || {
231+ let _ = WaitEnd . take_unwrap ( ) . recv ( ) ;
232+ } ) . finally {
233+ // Reacquire the condvar.
234+ match self . order {
235+ Just ( lock) => do lock. access {
236+ self . sem . acquire ( ) ;
237+ } ,
238+ Nothing => {
239+ self . sem . acquire ( ) ;
240+ } ,
251241 }
252242 }
253243 }
@@ -484,30 +474,28 @@ impl RWLock {
484474 */
485475 pub fn read < U > ( & self , blk: || -> U ) -> U {
486476 unsafe {
487- do task:: unkillable {
488- do ( & self . order_lock ) . access {
489- let state = & mut * self . state . get ( ) ;
490- let old_count = state. read_count . fetch_add ( 1 , atomics:: Acquire ) ;
491- if old_count == 0 {
492- ( & self . access_lock ) . acquire ( ) ;
493- state. read_mode = true ;
494- }
477+ do ( & self . order_lock ) . access {
478+ let state = & mut * self . state . get ( ) ;
479+ let old_count = state. read_count . fetch_add ( 1 , atomics:: Acquire ) ;
480+ if old_count == 0 {
481+ ( & self . access_lock ) . acquire ( ) ;
482+ state. read_mode = true ;
495483 }
496- do ( || {
497- do task :: rekillable { blk ( ) }
498- } ) . finally {
499- let state = & mut * self . state . get ( ) ;
500- assert ! ( state. read_mode ) ;
501- let old_count = state. read_count . fetch_sub ( 1 , atomics :: Release ) ;
502- assert ! ( old_count > 0 ) ;
503- if old_count == 1 {
504- state . read_mode = false ;
505- // Note: this release used to be outside of a locked access
506- // to exclusive-protected state. If this code is ever
507- // converted back to such (instead of using atomic ops),
508- // this access MUST NOT go inside the exclusive access.
509- ( & self . access_lock ) . release ( ) ;
510- }
484+ }
485+ do ( || {
486+ blk ( )
487+ } ) . finally {
488+ let state = & mut * self . state . get ( ) ;
489+ assert ! ( state. read_mode ) ;
490+ let old_count = state . read_count . fetch_sub ( 1 , atomics :: Release ) ;
491+ assert ! ( old_count > 0 ) ;
492+ if old_count == 1 {
493+ state . read_mode = false ;
494+ // Note: this release used to be outside of a locked access
495+ // to exclusive-protected state. If this code is ever
496+ // converted back to such (instead of using atomic ops),
497+ // this access MUST NOT go inside the exclusive access.
498+ ( & self . access_lock ) . release ( ) ;
511499 }
512500 }
513501 }
@@ -518,14 +506,10 @@ impl RWLock {
518506 * 'write' from other tasks will run concurrently with this one.
519507 */
520508 pub fn write < U > ( & self , blk: || -> U ) -> U {
521- do task:: unkillable {
522- ( & self . order_lock ) . acquire ( ) ;
523- do ( & self . access_lock ) . access {
524- ( & self . order_lock ) . release ( ) ;
525- do task:: rekillable {
526- blk( )
527- }
528- }
509+ ( & self . order_lock ) . acquire ( ) ;
510+ do ( & self . access_lock ) . access {
511+ ( & self . order_lock ) . release ( ) ;
512+ blk ( )
529513 }
530514 }
531515
@@ -562,16 +546,12 @@ impl RWLock {
562546 // which can't happen until T2 finishes the downgrade-read entirely.
563547 // The astute reader will also note that making waking writers use the
564548 // order_lock is better for not starving readers.
565- do task:: unkillable {
566- ( & self . order_lock ) . acquire ( ) ;
567- do ( & self . access_lock ) . access_cond |cond| {
568- ( & self . order_lock ) . release ( ) ;
569- do task:: rekillable {
570- let opt_lock = Just ( & self . order_lock ) ;
571- blk ( & Condvar { sem : cond. sem , order : opt_lock,
572- token : NonCopyable } )
573- }
574- }
549+ ( & self . order_lock ) . acquire ( ) ;
550+ do ( & self . access_lock ) . access_cond |cond| {
551+ ( & self . order_lock ) . release ( ) ;
552+ let opt_lock = Just ( & self . order_lock ) ;
553+ blk ( & Condvar { sem : cond. sem , order : opt_lock,
554+ token : NonCopyable } )
575555 }
576556 }
577557
@@ -599,39 +579,35 @@ impl RWLock {
599579 pub fn write_downgrade < U > ( & self , blk: |v: RWLockWriteMode | -> U ) -> U {
600580 // Implementation slightly different from the slicker 'write's above.
601581 // The exit path is conditional on whether the caller downgrades.
602- do task:: unkillable {
603- ( & self . order_lock ) . acquire ( ) ;
604- ( & self . access_lock ) . acquire ( ) ;
605- ( & self . order_lock ) . release ( ) ;
606- do ( || {
607- do task:: rekillable {
608- blk( RWLockWriteMode { lock : self , token : NonCopyable } )
609- }
610- } ) . finally {
611- let writer_or_last_reader;
612- // Check if we're releasing from read mode or from write mode.
613- let state = unsafe { & mut * self . state . get ( ) } ;
614- if state. read_mode {
615- // Releasing from read mode.
616- let old_count = state. read_count . fetch_sub ( 1 , atomics:: Release ) ;
617- assert ! ( old_count > 0 ) ;
618- // Check if other readers remain.
619- if old_count == 1 {
620- // Case 1: Writer downgraded & was the last reader
621- writer_or_last_reader = true ;
622- state. read_mode = false ;
623- } else {
624- // Case 2: Writer downgraded & was not the last reader
625- writer_or_last_reader = false ;
626- }
627- } else {
628- // Case 3: Writer did not downgrade
582+ ( & self . order_lock ) . acquire ( ) ;
583+ ( & self . access_lock ) . acquire ( ) ;
584+ ( & self . order_lock ) . release ( ) ;
585+ do ( || {
586+ blk ( RWLockWriteMode { lock : self , token : NonCopyable } )
587+ } ) . finally {
588+ let writer_or_last_reader;
589+ // Check if we're releasing from read mode or from write mode.
590+ let state = unsafe { & mut * self . state . get ( ) } ;
591+ if state. read_mode {
592+ // Releasing from read mode.
593+ let old_count = state. read_count . fetch_sub ( 1 , atomics:: Release ) ;
594+ assert ! ( old_count > 0 ) ;
595+ // Check if other readers remain.
596+ if old_count == 1 {
597+ // Case 1: Writer downgraded & was the last reader
629598 writer_or_last_reader = true ;
599+ state. read_mode = false ;
600+ } else {
601+ // Case 2: Writer downgraded & was not the last reader
602+ writer_or_last_reader = false ;
630603 }
631- if writer_or_last_reader {
632- // Nobody left inside; release the "reader cloud" lock.
633- ( & self . access_lock ) . release ( ) ;
634- }
604+ } else {
605+ // Case 3: Writer did not downgrade
606+ writer_or_last_reader = true ;
607+ }
608+ if writer_or_last_reader {
609+ // Nobody left inside; release the "reader cloud" lock.
610+ ( & self . access_lock ) . release ( ) ;
635611 }
636612 }
637613 }
@@ -643,23 +619,21 @@ impl RWLock {
643619 fail ! ( "Can't downgrade() with a different rwlock's write_mode!" ) ;
644620 }
645621 unsafe {
646- do task:: unkillable {
647- let state = & mut * self . state . get ( ) ;
648- assert ! ( !state. read_mode) ;
649- state. read_mode = true ;
650- // If a reader attempts to enter at this point, both the
651- // downgrader and reader will set the mode flag. This is fine.
652- let old_count = state. read_count . fetch_add ( 1 , atomics:: Release ) ;
653- // If another reader was already blocking, we need to hand-off
654- // the "reader cloud" access lock to them.
655- if old_count != 0 {
656- // Guaranteed not to let another writer in, because
657- // another reader was holding the order_lock. Hence they
658- // must be the one to get the access_lock (because all
659- // access_locks are acquired with order_lock held). See
660- // the comment in write_cond for more justification.
661- ( & self . access_lock ) . release ( ) ;
662- }
622+ let state = & mut * self . state . get ( ) ;
623+ assert ! ( !state. read_mode) ;
624+ state. read_mode = true ;
625+ // If a reader attempts to enter at this point, both the
626+ // downgrader and reader will set the mode flag. This is fine.
627+ let old_count = state. read_count . fetch_add ( 1 , atomics:: Release ) ;
628+ // If another reader was already blocking, we need to hand-off
629+ // the "reader cloud" access lock to them.
630+ if old_count != 0 {
631+ // Guaranteed not to let another writer in, because
632+ // another reader was holding the order_lock. Hence they
633+ // must be the one to get the access_lock (because all
634+ // access_locks are acquired with order_lock held). See
635+ // the comment in write_cond for more justification.
636+ ( & self . access_lock ) . release ( ) ;
663637 }
664638 }
665639 RWLockReadMode { lock : token. lock , token : NonCopyable }
0 commit comments