diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs
index 6e4e2ac0feee3..ad07a98ed6125 100644
--- a/src/libgreen/lib.rs
+++ b/src/libgreen/lib.rs
@@ -395,6 +395,7 @@ impl SchedPool {
     ///
     /// This will configure the pool according to the `config` parameter, and
     /// initially run `main` inside the pool of schedulers.
+    #[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
     pub fn new(config: PoolConfig) -> SchedPool {
         static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT;
 
diff --git a/src/libgreen/stack.rs b/src/libgreen/stack.rs
index af53766617cfe..7ab18b62bfdd4 100644
--- a/src/libgreen/stack.rs
+++ b/src/libgreen/stack.rs
@@ -157,6 +157,23 @@ impl StackPool {
     }
 }
 
+#[cfg(not(stage0))]
+fn max_cached_stacks() -> uint {
+    static mut AMT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
+    match AMT.load(atomics::SeqCst) {
+        0 => {}
+        n => return n - 1,
+    }
+    let amt = getenv("RUST_MAX_CACHED_STACKS").and_then(|s| from_str(s.as_slice()));
+    // This default corresponds to 20M of cache per scheduler (at the
+    // default size).
+    let amt = amt.unwrap_or(10);
+    // 0 is our sentinel value, so ensure that we'll never see 0 after
+    // initialization has run
+    AMT.store(amt + 1, atomics::SeqCst);
+    return amt;
+}
+#[cfg(stage0)]
 fn max_cached_stacks() -> uint {
     static mut AMT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
     match unsafe { AMT.load(atomics::SeqCst) } {
diff --git a/src/liblog/lib.rs b/src/liblog/lib.rs
index 554f27b881b72..a878fc0eccb26 100644
--- a/src/liblog/lib.rs
+++ b/src/liblog/lib.rs
@@ -124,6 +124,7 @@ use std::mem;
 use std::os;
 use std::rt;
 use std::slice;
+use std::sync::atomics;
 use std::sync::{Once, ONCE_INIT};
 
 use directive::LOG_LEVEL_NAMES;
@@ -141,7 +142,7 @@ static DEFAULT_LOG_LEVEL: u32 = 1;
 /// An unsafe constant that is the maximum logging level of any module
 /// specified. This is the first line of defense to determining whether a
 /// logging statement should be run.
-static mut LOG_LEVEL: u32 = MAX_LOG_LEVEL;
+static mut LOG_LEVEL: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
 
 static mut DIRECTIVES: *const Vec<directive::LogDirective> =
     0 as *const Vec<directive::LogDirective>;
@@ -242,7 +243,29 @@ pub fn log(level: u32, loc: &'static LogLocation, args: &fmt::Arguments) {
 /// safely
 #[doc(hidden)]
 #[inline(always)]
-pub fn log_level() -> u32 { unsafe { LOG_LEVEL } }
+#[cfg(not(stage0))]
+pub fn log_level() -> u32 {
+    // We need accessing the log level to be as fast as possible, so we want to
+    // use as relaxed of an ordering as possible. Once the log level has been
+    // initialized one, it will never change. The default log level also
+    // indicates that we *must* log everything (or at least attempt to run
+    // initialization).
+    //
+    // For this reason, we do a relaxed load here. It can either read the
+    // initial value of LOG_LEVEL in which case the more expensive check will be
+    // run. It could also read the updated value of LOG_LEVEL in which case it
+    // reads the correct value.
+    //
+    // Also note that the log level stored is the real log level plus 1 so the
+    // static initialization of 0 indicates that "everything must be logged"
+    LOG_LEVEL.load(atomics::Relaxed) as u32 - 1
+}
+
+/// dox
+#[doc(hidden)]
+#[inline(always)]
+#[cfg(stage0)]
+pub fn log_level() -> u32 { unsafe { LOG_LEVEL.load(atomics::Relaxed) as u32 - 1 } }
 
 /// Replaces the task-local logger with the specified logger, returning the old
 /// logger.
@@ -282,6 +305,26 @@ pub struct LogLocation {
 /// logging. This is the second layer of defense about determining whether a
 /// module's log statement should be emitted or not.
 #[doc(hidden)]
+#[cfg(not(stage0))]
+pub fn mod_enabled(level: u32, module: &str) -> bool {
+    static mut INIT: Once = ONCE_INIT;
+    INIT.doit(init);
+
+    // It's possible for many threads to be in this function, but only one of
+    // them will peform the global initialization. All of them will need to
+    // check again to whether they should really be here or not. Hence, despite
+    // this check being expanded manually in the logging macro, this function
+    // checks the log level again.
+    if level > log_level() { return false }
+
+    // This assertion should never get tripped unless we're in an at_exit
+    // handler after logging has been torn down and a logging attempt was made.
+    assert!(!DIRECTIVES.is_null());
+
+    enabled(level, module, unsafe { (*DIRECTIVES).iter() })
+}
+/// dox
+#[cfg(stage0)]
 pub fn mod_enabled(level: u32, module: &str) -> bool {
     static mut INIT: Once = ONCE_INIT;
     unsafe { INIT.doit(init); }
@@ -291,7 +334,7 @@ pub fn mod_enabled(level: u32, module: &str) -> bool {
     // again to whether they should really be here or not. Hence, despite this
     // check being expanded manually in the logging macro, this function checks
     // the log level again.
-    if level > unsafe { LOG_LEVEL } { return false }
+    if level > log_level() { return false }
 
     // This assertion should never get tripped unless we're in an at_exit
     // handler after logging has been torn down and a logging attempt was made.
@@ -320,6 +363,7 @@ fn enabled(level: u32,
 ///
 /// This is not threadsafe at all, so initialization os performed through a
 /// `Once` primitive (and this function is called from that primitive).
+#[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
 fn init() {
     let mut directives = match os::getenv("RUST_LOG") {
         Some(spec) => directive::parse_logging_spec(spec.as_slice()),
@@ -340,8 +384,10 @@ fn init() {
     };
 
     unsafe {
-        LOG_LEVEL = max_level;
+        LOG_LEVEL.store(max_level as uint + 1, atomics::SeqCst);
+    }
 
+    unsafe {
         assert!(DIRECTIVES.is_null());
         DIRECTIVES = mem::transmute(box directives);
 
diff --git a/src/libnative/io/process.rs b/src/libnative/io/process.rs
index c89a40d651351..814ce0928779e 100644
--- a/src/libnative/io/process.rs
+++ b/src/libnative/io/process.rs
@@ -932,6 +932,7 @@ fn waitpid(pid: pid_t, deadline: u64) -> IoResult<rtio::ProcessExit> {
 }
 
 #[cfg(unix)]
+#[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
 fn waitpid(pid: pid_t, deadline: u64) -> IoResult<rtio::ProcessExit> {
     use std::cmp;
     use std::comm;
diff --git a/src/libnative/io/timer_unix.rs b/src/libnative/io/timer_unix.rs
index 87c320e0457cb..d0a707f1d4221 100644
--- a/src/libnative/io/timer_unix.rs
+++ b/src/libnative/io/timer_unix.rs
@@ -202,6 +202,24 @@ fn helper(input: libc::c_int, messages: Receiver<Req>, _: ()) {
 }
 
 impl Timer {
+    #[cfg(not(stage0))]
+    pub fn new() -> IoResult<Timer> {
+        HELPER.boot(|| {}, helper);
+
+        static mut ID: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
+        let id = ID.fetch_add(1, atomics::Relaxed);
+        Ok(Timer {
+            id: id,
+            inner: Some(box Inner {
+                cb: None,
+                interval: 0,
+                target: 0,
+                repeat: false,
+                id: id,
+            })
+        })
+    }
+    #[cfg(stage0)]
     pub fn new() -> IoResult<Timer> {
         // See notes above regarding using int return value
         // instead of ()
@@ -233,6 +251,18 @@ impl Timer {
         }
     }
 
+    #[cfg(not(stage0))]
+    fn inner(&mut self) -> Box<Inner> {
+        match self.inner.take() {
+            Some(i) => i,
+            None => {
+                let (tx, rx) = channel();
+                HELPER.send(RemoveTimer(self.id, tx));
+                rx.recv()
+            }
+        }
+    }
+    #[cfg(stage0)]
     fn inner(&mut self) -> Box<Inner> {
         match self.inner.take() {
             Some(i) => i,
@@ -254,6 +284,7 @@ impl rtio::RtioTimer for Timer {
         Timer::sleep(msecs);
     }
 
+    #[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
     fn oneshot(&mut self, msecs: u64, cb: Box<rtio::Callback + Send>) {
         let now = now();
         let mut inner = self.inner();
@@ -266,6 +297,7 @@ impl rtio::RtioTimer for Timer {
         unsafe { HELPER.send(NewTimer(inner)); }
     }
 
+    #[allow(unused_unsafe)] // NOTE: remove after a stage0 snap
     fn period(&mut self, msecs: u64, cb: Box<rtio::Callback + Send>) {
         let now = now();
         let mut inner = self.inner();
diff --git a/src/librustc/middle/effect.rs b/src/librustc/middle/effect.rs
index e6160038b1d8c..03466c49d13a6 100644
--- a/src/librustc/middle/effect.rs
+++ b/src/librustc/middle/effect.rs
@@ -15,6 +15,9 @@ use middle::def;
 use middle::ty;
 use middle::typeck::MethodCall;
 use util::ppaux;
+use util::nodemap::NodeSet;
+use euv = middle::expr_use_visitor;
+use mc = middle::mem_categorization;
 
 use syntax::ast;
 use syntax::ast_util::PostExpansionMethod;
@@ -40,10 +43,18 @@ fn type_is_unsafe_function(ty: ty::t) -> bool {
 struct EffectCheckVisitor<'a> {
     tcx: &'a ty::ctxt,
 
+    mutably_accessed_statics: &'a mut NodeSet,
+
     /// Whether we're in an unsafe context.
     unsafe_context: UnsafeContext,
 }
 
+struct FunctionVisitor<'a, 'b>(euv::ExprUseVisitor<'a, 'b, ty::ctxt>);
+
+struct StaticMutChecker<'a> {
+    mutably_accessed_statics: NodeSet,
+}
+
 impl<'a> EffectCheckVisitor<'a> {
     fn require_unsafe(&mut self, span: Span, description: &str) {
         match self.unsafe_context {
@@ -142,6 +153,10 @@ impl<'a> Visitor<()> for EffectCheckVisitor<'a> {
     }
 
     fn visit_expr(&mut self, expr: &ast::Expr, _:()) {
+        if self.mutably_accessed_statics.remove(&expr.id) {
+            self.require_unsafe(expr.span, "mutable use of static")
+        }
+
         match expr.node {
             ast::ExprMethodCall(_, _, _) => {
                 let method_call = MethodCall::expr(expr.id);
@@ -185,7 +200,12 @@ impl<'a> Visitor<()> for EffectCheckVisitor<'a> {
             ast::ExprPath(..) => {
                 match ty::resolve_expr(self.tcx, expr) {
                     def::DefStatic(_, true) => {
-                        self.require_unsafe(expr.span, "use of mutable static")
+                        let ty = ty::node_id_to_type(self.tcx, expr.id);
+                        let contents = ty::type_contents(self.tcx, ty);
+                        if !contents.is_sharable(self.tcx) {
+                            self.require_unsafe(expr.span,
+                                                "use of non-Share static mut")
+                        }
                     }
                     _ => {}
                 }
@@ -197,11 +217,98 @@ impl<'a> Visitor<()> for EffectCheckVisitor<'a> {
     }
 }
 
+impl<'a, 'b> Visitor<()> for FunctionVisitor<'a, 'b> {
+    fn visit_fn(&mut self, fk: &visit::FnKind, fd: &ast::FnDecl,
+                b: &ast::Block, s: Span, _: ast::NodeId, _: ()) {
+        {
+            let FunctionVisitor(ref mut inner) = *self;
+            inner.walk_fn(fd, b);
+        }
+        visit::walk_fn(self, fk, fd, b, s, ());
+    }
+}
+
+impl<'a> StaticMutChecker<'a> {
+    fn is_static_mut(&self, mut cur: &mc::cmt) -> bool {
+        loop {
+            match cur.cat {
+                mc::cat_static_item => {
+                    return match cur.mutbl {
+                        mc::McImmutable => return false,
+                        _ => true
+                    }
+                }
+                mc::cat_deref(ref cmt, _, _) |
+                mc::cat_discr(ref cmt, _) |
+                mc::cat_downcast(ref cmt) |
+                mc::cat_interior(ref cmt, _) => cur = cmt,
+
+                mc::cat_rvalue(..) |
+                mc::cat_copied_upvar(..) |
+                mc::cat_upvar(..) |
+                mc::cat_local(..) |
+                mc::cat_arg(..) => return false
+            }
+        }
+    }
+}
+
+impl<'a> euv::Delegate for StaticMutChecker<'a> {
+    fn borrow(&mut self,
+              borrow_id: ast::NodeId,
+              _borrow_span: Span,
+              cmt: mc::cmt,
+              _loan_region: ty::Region,
+              bk: ty::BorrowKind,
+              _loan_cause: euv::LoanCause) {
+        if !self.is_static_mut(&cmt) {
+            return
+        }
+        match bk {
+            ty::ImmBorrow => {}
+            ty::UniqueImmBorrow | ty::MutBorrow => {
+                self.mutably_accessed_statics.insert(borrow_id);
+            }
+        }
+    }
+
+    fn mutate(&mut self,
+              assignment_id: ast::NodeId,
+              _assignment_span: Span,
+              assignee_cmt: mc::cmt,
+              _mode: euv::MutateMode) {
+        if !self.is_static_mut(&assignee_cmt) {
+            return
+        }
+        self.mutably_accessed_statics.insert(assignment_id);
+    }
+
+    fn consume(&mut self,
+               _consume_id: ast::NodeId,
+               _consume_span: Span,
+               _cmt: mc::cmt,
+               _mode: euv::ConsumeMode) {}
+    fn consume_pat(&mut self,
+                   _consume_pat: &ast::Pat,
+                   _cmt: mc::cmt,
+                   _mode: euv::ConsumeMode) {}
+    fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) {}
+}
+
 pub fn check_crate(tcx: &ty::ctxt, krate: &ast::Crate) {
+    let mut delegate = StaticMutChecker {
+        mutably_accessed_statics: NodeSet::new(),
+    };
+    {
+        let visitor = euv::ExprUseVisitor::new(&mut delegate, tcx);
+        visit::walk_crate(&mut FunctionVisitor(visitor), krate, ());
+    }
+
     let mut visitor = EffectCheckVisitor {
         tcx: tcx,
         unsafe_context: SafeContext,
+        mutably_accessed_statics: &mut delegate.mutably_accessed_statics,
     };
-
     visit::walk_crate(&mut visitor, krate, ());
+    assert!(visitor.mutably_accessed_statics.len() == 0);
 }
diff --git a/src/librustrt/bookkeeping.rs b/src/librustrt/bookkeeping.rs
index dc96aecff8017..3eaeb7567ec9a 100644
--- a/src/librustrt/bookkeeping.rs
+++ b/src/librustrt/bookkeeping.rs
@@ -34,11 +34,18 @@ impl Drop for Token {
 
 /// Increment the number of live tasks, returning a token which will decrement
 /// the count when dropped.
+#[cfg(stage0)]
 pub fn increment() -> Token {
     let _ = unsafe { TASK_COUNT.fetch_add(1, atomics::SeqCst) };
     Token { _private: () }
 }
+#[cfg(not(stage0))]
+pub fn increment() -> Token {
+    let _ = TASK_COUNT.fetch_add(1, atomics::SeqCst);
+    Token { _private: () }
+}
 
+#[cfg(stage0)]
 pub fn decrement() {
     unsafe {
         if TASK_COUNT.fetch_sub(1, atomics::SeqCst) == 1 {
@@ -47,6 +54,15 @@ pub fn decrement() {
         }
     }
 }
+#[cfg(not(stage0))]
+pub fn decrement() {
+    if TASK_COUNT.fetch_sub(1, atomics::SeqCst) == 1 {
+        unsafe {
+            let guard = TASK_LOCK.lock();
+            guard.signal();
+        }
+    }
+}
 
 /// Waits for all other native tasks in the system to exit. This is only used by
 /// the entry points of native programs
diff --git a/src/librustrt/local_ptr.rs b/src/librustrt/local_ptr.rs
index c94e5c6187b3a..9b16d53c0e461 100644
--- a/src/librustrt/local_ptr.rs
+++ b/src/librustrt/local_ptr.rs
@@ -207,11 +207,18 @@ pub mod compiled {
 
     /// Check whether there is a thread-local pointer installed.
     #[inline(never)] // see comments above
+    #[cfg(stage0)]
     pub fn exists() -> bool {
         unsafe {
             RT_TLS_PTR.is_not_null()
         }
     }
+    /// Check whether there is a thread-local pointer installed.
+    #[inline(never)] // see comments above
+    #[cfg(not(stage0))]
+    pub fn exists() -> bool {
+        RT_TLS_PTR.is_not_null()
+    }
 
     #[inline(never)] // see comments above
     pub unsafe fn unsafe_borrow<T>() -> *mut T {
@@ -373,20 +380,17 @@ pub mod native {
     }
 
     #[inline]
-    #[cfg(not(test))]
+    #[cfg(not(test), stage0)]
     #[allow(visible_private_types)]
     pub fn maybe_tls_key() -> Option<tls::Key> {
         unsafe {
-            // NB: This is a little racy because, while the key is
-            // initialized under a mutex and it's assumed to be initialized
-            // in the Scheduler ctor by any thread that needs to use it,
-            // we are not accessing the key under a mutex.  Threads that
-            // are not using the new Scheduler but still *want to check*
-            // whether they are running under a new Scheduler may see a 0
-            // value here that is in the process of being initialized in
-            // another thread. I think this is fine since the only action
-            // they could take if it was initialized would be to check the
-            // thread-local value and see that it's not set.
+            // NB: This could lead to undefined behavior because it is an
+            // unsynchronized read of a global, but it's generally undefined to
+            // boot the runtime as you start using it.
+            //
+            // This global is written immediately on program startup (before all
+            // usage of it), so all proper usage of the runtime will not invoke
+            // undefined behvaior here.
             if RT_TLS_KEY != -1 {
                 return Some(RT_TLS_KEY);
             } else {
@@ -395,6 +399,24 @@ pub mod native {
         }
     }
 
+    #[inline]
+    #[cfg(not(test), not(stage0))]
+    #[allow(visible_private_types)]
+    pub fn maybe_tls_key() -> Option<tls::Key> {
+        // NB: This could lead to undefined behavior because it is an
+        // unsynchronized read of a global, but it's generally undefined to boot
+        // the runtime as you start using it.
+        //
+        // This global is written immediately on program startup (before all
+        // usage of it), so all proper usage of the runtime will not invoke
+        // undefined behvaior here.
+        if RT_TLS_KEY != -1 {
+            return Some(RT_TLS_KEY);
+        } else {
+            return None;
+        }
+    }
+
     #[inline] #[cfg(test)]
     pub fn maybe_tls_key() -> Option<tls::Key> {
         use realrustrt;
diff --git a/src/libstd/io/tempfile.rs b/src/libstd/io/tempfile.rs
index f580dfd80f0ce..d0cfe6719233d 100644
--- a/src/libstd/io/tempfile.rs
+++ b/src/libstd/io/tempfile.rs
@@ -37,6 +37,30 @@ impl TempDir {
     /// deleted once the returned wrapper is destroyed.
     ///
     /// If no directory can be created, None is returned.
+    #[cfg(not(stage0))]
+    pub fn new_in(tmpdir: &Path, suffix: &str) -> Option<TempDir> {
+        if !tmpdir.is_absolute() {
+            return TempDir::new_in(&os::make_absolute(tmpdir), suffix);
+        }
+
+        static mut CNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
+
+        for _ in range(0u, 1000) {
+            let filename =
+                format!("rs-{}-{}-{}",
+                        unsafe { libc::getpid() },
+                        CNT.fetch_add(1, atomics::SeqCst),
+                        suffix);
+            let p = tmpdir.join(filename);
+            match fs::mkdir(&p, io::UserRWX) {
+                Err(..) => {}
+                Ok(()) => return Some(TempDir { path: Some(p), disarmed: false })
+            }
+        }
+        None
+    }
+    /// dox
+    #[cfg(stage0)]
     pub fn new_in(tmpdir: &Path, suffix: &str) -> Option<TempDir> {
         if !tmpdir.is_absolute() {
             return TempDir::new_in(&os::make_absolute(tmpdir), suffix);
diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs
index 26e854d9d999f..90488eee4341e 100644
--- a/src/libstd/io/test.rs
+++ b/src/libstd/io/test.rs
@@ -54,6 +54,13 @@ macro_rules! iotest (
 )
 
 /// Get a port number, starting at 9600, for use in tests
+#[cfg(not(stage0))]
+pub fn next_test_port() -> u16 {
+    static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;
+    base_port() + next_offset.fetch_add(1, Relaxed) as u16
+}
+/// dox
+#[cfg(stage0)]
 pub fn next_test_port() -> u16 {
     static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;
     unsafe {
@@ -62,6 +69,24 @@ pub fn next_test_port() -> u16 {
 }
 
 /// Get a temporary path which could be the location of a unix socket
+#[cfg(not(stage0))]
+pub fn next_test_unix() -> Path {
+    static mut COUNT: AtomicUint = INIT_ATOMIC_UINT;
+    // base port and pid are an attempt to be unique between multiple
+    // test-runners of different configurations running on one
+    // buildbot, the count is to be unique within this executable.
+    let string = format!("rust-test-unix-path-{}-{}-{}",
+                         base_port(),
+                         unsafe {libc::getpid()},
+                         COUNT.fetch_add(1, Relaxed));
+    if cfg!(unix) {
+        os::tmpdir().join(string)
+    } else {
+        Path::new(format!("{}{}", r"\\.\pipe\", string))
+    }
+}
+/// dox
+#[cfg(stage0)]
 pub fn next_test_unix() -> Path {
     static mut COUNT: AtomicUint = INIT_ATOMIC_UINT;
     // base port and pid are an attempt to be unique between multiple
diff --git a/src/libstd/io/util.rs b/src/libstd/io/util.rs
index e928323030c4f..7c30bb295257c 100644
--- a/src/libstd/io/util.rs
+++ b/src/libstd/io/util.rs
@@ -341,11 +341,11 @@ mod test {
         let mut multi = MultiWriter::new(vec!(box TestWriter as Box<Writer>,
                                               box TestWriter as Box<Writer>));
         multi.write([1, 2, 3]).unwrap();
-        assert_eq!(2, unsafe { writes });
-        assert_eq!(0, unsafe { flushes });
+        assert_eq!(2, writes);
+        assert_eq!(0, flushes);
         multi.flush().unwrap();
-        assert_eq!(2, unsafe { writes });
-        assert_eq!(2, unsafe { flushes });
+        assert_eq!(2, writes);
+        assert_eq!(2, flushes);
     }
 
     #[test]
diff --git a/src/libstd/os.rs b/src/libstd/os.rs
index 7fff510a60a4b..2906a3b9af44d 100644
--- a/src/libstd/os.rs
+++ b/src/libstd/os.rs
@@ -1082,12 +1082,22 @@ static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT;
  *
  * Note that this is not synchronized against modifications of other threads.
  */
+#[cfg(not(stage0))]
+pub fn set_exit_status(code: int) {
+    EXIT_STATUS.store(code, SeqCst)
+}
+#[cfg(stage0)]
 pub fn set_exit_status(code: int) {
     unsafe { EXIT_STATUS.store(code, SeqCst) }
 }
 
 /// Fetches the process's current exit code. This defaults to 0 and can change
 /// by calling `set_exit_status`.
+#[cfg(not(stage0))]
+pub fn get_exit_status() -> int {
+    EXIT_STATUS.load(SeqCst)
+}
+#[cfg(stage0)]
 pub fn get_exit_status() -> int {
     unsafe { EXIT_STATUS.load(SeqCst) }
 }
diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs
index 80493ebb4a936..15e46ac246f91 100644
--- a/src/libstd/rt/backtrace.rs
+++ b/src/libstd/rt/backtrace.rs
@@ -27,6 +27,23 @@ pub use self::imp::write;
 
 // For now logging is turned off by default, and this function checks to see
 // whether the magical environment variable is present to see if it's turned on.
+#[cfg(not(stage0))]
+pub fn log_enabled() -> bool {
+    static mut ENABLED: atomics::AtomicInt = atomics::INIT_ATOMIC_INT;
+    match ENABLED.load(atomics::SeqCst) {
+        1 => return false,
+        2 => return true,
+        _ => {}
+    }
+
+    let val = match os::getenv("RUST_BACKTRACE") {
+        Some(..) => 2,
+        None => 1,
+    };
+    ENABLED.store(val, atomics::SeqCst);
+    val == 2
+}
+#[cfg(stage0)]
 pub fn log_enabled() -> bool {
     static mut ENABLED: atomics::AtomicInt = atomics::INIT_ATOMIC_INT;
     unsafe {
diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs
index fa30ddbcc48b5..0fc6e94c252ff 100644
--- a/src/libstd/rt/util.rs
+++ b/src/libstd/rt/util.rs
@@ -40,6 +40,21 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
     (cfg!(target_os="macos")) && running_on_valgrind()
 }
 
+#[cfg(not(stage0))]
+pub fn min_stack() -> uint {
+    static mut MIN: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
+    match MIN.load(atomics::SeqCst) {
+        0 => {}
+        n => return n - 1,
+    }
+    let amt = os::getenv("RUST_MIN_STACK").and_then(|s| from_str(s.as_slice()));
+    let amt = amt.unwrap_or(2 * 1024 * 1024);
+    // 0 is our sentinel value, so ensure that we'll never see 0 after
+    // initialization has run
+    MIN.store(amt + 1, atomics::SeqCst);
+    return amt;
+}
+#[cfg(stage0)]
 pub fn min_stack() -> uint {
     static mut MIN: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
     match unsafe { MIN.load(atomics::SeqCst) } {
diff --git a/src/libsync/atomics.rs b/src/libsync/atomics.rs
index 0be124ad58408..255669390bad2 100644
--- a/src/libsync/atomics.rs
+++ b/src/libsync/atomics.rs
@@ -95,10 +95,8 @@
 //!
 //! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
 //!
-//! unsafe {
-//!     let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
-//!     println!("live tasks: {}", old_task_count + 1);
-//! }
+//! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
+//! println!("live tasks: {}", old_task_count + 1);
 //! ```
 
 use core::prelude::*;
diff --git a/src/libsync/mutex.rs b/src/libsync/mutex.rs
index 1aa84e8f8d149..fccd614acc711 100644
--- a/src/libsync/mutex.rs
+++ b/src/libsync/mutex.rs
@@ -129,7 +129,7 @@ enum Flavor {
 ///
 /// static mut LOCK: StaticMutex = MUTEX_INIT;
 ///
-/// unsafe {
+/// {
 ///     let _g = LOCK.lock();
 ///     // do some productive work
 /// }
diff --git a/src/libsync/one.rs b/src/libsync/one.rs
index 6fad2c8aa404d..d4dc5b64c99bf 100644
--- a/src/libsync/one.rs
+++ b/src/libsync/one.rs
@@ -32,11 +32,9 @@ use mutex::{StaticMutex, MUTEX_INIT};
 ///
 /// static mut START: Once = ONCE_INIT;
 ///
-/// unsafe {
-///     START.doit(|| {
-///         // run initialization here
-///     });
-/// }
+/// START.doit(|| {
+///     // run initialization here
+/// });
 /// ```
 pub struct Once {
     mutex: StaticMutex,
diff --git a/src/test/compile-fail/static-mut-foreign-requires-unsafe.rs b/src/test/compile-fail/static-mut-foreign-requires-unsafe.rs
index 0e44af19a7f62..2ce096d6a1509 100644
--- a/src/test/compile-fail/static-mut-foreign-requires-unsafe.rs
+++ b/src/test/compile-fail/static-mut-foreign-requires-unsafe.rs
@@ -17,5 +17,4 @@ extern {
 fn main() {
     a += 3;     //~ ERROR: requires unsafe
     a = 4;      //~ ERROR: requires unsafe
-    let _b = a; //~ ERROR: requires unsafe
 }
diff --git a/src/test/compile-fail/static-mut-needs-unsafe.rs b/src/test/compile-fail/static-mut-needs-unsafe.rs
new file mode 100644
index 0000000000000..72d4b9ffba059
--- /dev/null
+++ b/src/test/compile-fail/static-mut-needs-unsafe.rs
@@ -0,0 +1,66 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::kinds::marker;
+
+struct NonSharable {
+    field: uint,
+    noshare: marker::NoShare
+}
+
+struct Sharable {
+    field: uint
+}
+
+impl Sharable {
+    fn foo(&self) {}
+    fn foo_mut(&mut self) {}
+}
+
+static mut NON_SHARABLE: NonSharable = NonSharable {
+    field: 1,
+    noshare: marker::NoShare,
+};
+
+static mut SHARABLE: Sharable = Sharable { field: 0 };
+
+pub fn fn_mut(_: &mut Sharable) {}
+
+pub fn main() {
+    SHARABLE.foo();
+
+    SHARABLE.foo_mut();
+    //~^ ERROR: mutable use of static requires unsafe function or block
+
+    SHARABLE.field = 2;
+    //~^ ERROR: mutable use of static requires unsafe function or block
+
+    fn_mut(&mut SHARABLE);
+    //~^ ERROR mutable use of static requires unsafe function or block
+
+    NON_SHARABLE.field = 2;
+    //~^ ERROR: use of non-Share static mut requires unsafe function or block
+    //~^^ ERROR: mutable use of static requires unsafe function or block
+
+    SHARABLE = Sharable {field: 1};
+    //~^ ERROR: mutable use of static requires unsafe function or block
+
+    let _: &mut Sharable = &mut SHARABLE;
+    //~^ ERROR mutable use of static requires unsafe function or block
+
+    let _ = &NON_SHARABLE.field;
+    //~^ ERROR: use of non-Share static mut requires unsafe function or block
+
+    let mut slc = ['a', 'c'];
+    slc[NON_SHARABLE.field] = 'b';
+    //~^ ERROR: use of non-Share static mut requires unsafe function or block
+
+    slc[SHARABLE.field] = 'b';
+}
diff --git a/src/test/compile-fail/static-mut-requires-unsafe.rs b/src/test/compile-fail/static-mut-requires-unsafe.rs
index 7337920cce68c..cd3c67a8930da 100644
--- a/src/test/compile-fail/static-mut-requires-unsafe.rs
+++ b/src/test/compile-fail/static-mut-requires-unsafe.rs
@@ -13,5 +13,4 @@ static mut a: int = 3;
 fn main() {
     a += 3;         //~ ERROR: requires unsafe
     a = 4;          //~ ERROR: requires unsafe
-    let _b = a;     //~ ERROR: requires unsafe
 }
diff --git a/src/test/run-pass/static-mut-no-need-unsafe.rs b/src/test/run-pass/static-mut-no-need-unsafe.rs
new file mode 100644
index 0000000000000..21c46186c31aa
--- /dev/null
+++ b/src/test/run-pass/static-mut-no-need-unsafe.rs
@@ -0,0 +1,39 @@
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Sharable {
+    field: uint
+}
+
+impl Sharable {
+    fn foo(&self) {}
+    fn foo_mut(&mut self) {}
+}
+
+static mut FOO: Sharable = Sharable { field: 1 };
+
+fn borrow_static(_: &Sharable) {}
+
+pub fn main() {
+
+    FOO.foo();
+
+    borrow_static(&FOO);
+
+    let _ = &FOO;
+
+    unsafe { let _: &mut Sharable = &mut FOO; }
+
+    let mut slc = ['a', 'c'];
+    slc[FOO.field] = 'b';
+
+    let _ =  &((((FOO))));
+}
+