diff --git a/src/commands.rs b/src/commands.rs index 1fd9a6764..31e1b6801 100644 --- a/src/commands.rs +++ b/src/commands.rs @@ -562,7 +562,11 @@ fn handle_compile_response(mut creator: T, // get a response then we just forge ahead locally and run the // compilation ourselves. Err(ProtobufError::IoError(ref e)) - if e.kind() == io::ErrorKind::UnexpectedEof => {} + if e.kind() == io::ErrorKind::UnexpectedEof => { + writeln!(io::stderr(), + "warning: sccache server looks like it shut down \ + unexpectedly, compiling locally instead").unwrap(); + } Err(e) => return Err(e).chain_err(|| { //TODO: something better here? diff --git a/src/compiler/compiler.rs b/src/compiler/compiler.rs index 4332a2731..f622f4f84 100644 --- a/src/compiler/compiler.rs +++ b/src/compiler/compiler.rs @@ -49,6 +49,7 @@ use std::time::{ }; use tempdir::TempDir; use util::fmt_duration_as_secs; +use tokio_core::reactor::{Handle, Timeout}; use errors::*; @@ -99,7 +100,8 @@ pub trait CompilerHasher: fmt::Debug + Send + 'static arguments: Vec, cwd: String, cache_control: CacheControl, - pool: CpuPool) + pool: CpuPool, + handle: Handle) -> SFuture<(CompileResult, process::Output)> { let out_file = self.output_file().into_owned(); @@ -126,6 +128,20 @@ pub trait CompilerHasher: fmt::Debug + Send + 'static storage.get(&key) }; + // Set a maximum time limit for the cache to respond before we forge + // ahead ourselves with a compilation. + let timeout = Duration::new(60, 0); + let timeout = Timeout::new(timeout, &handle).into_future().flatten(); + + let cache_status = cache_status.map(Some); + let timeout = timeout.map(|_| None).chain_err(|| "timeout error"); + let cache_status = cache_status.select(timeout).then(|r| { + match r { + Ok((e, _other)) => Ok(e), + Err((e, _other)) => Err(e), + } + }); + // Check the result of the cache lookup. Box::new(cache_status.and_then(move |result| { let duration = start.elapsed(); @@ -135,7 +151,7 @@ pub trait CompilerHasher: fmt::Debug + Send + 'static .collect::>(); let miss_type = match result { - Cache::Hit(mut entry) => { + Some(Cache::Hit(mut entry)) => { debug!("[{}]: Cache hit in {}", out_file, fmt_duration_as_secs(&duration)); let mut stdout = io::Cursor::new(vec!()); let mut stderr = io::Cursor::new(vec!()); @@ -161,14 +177,18 @@ pub trait CompilerHasher: fmt::Debug + Send + 'static (result, output) })) as SFuture<_> } - Cache::Miss => { + Some(Cache::Miss) => { debug!("[{}]: Cache miss", out_file); MissType::Normal } - Cache::Recache => { + Some(Cache::Recache) => { debug!("[{}]: Cache recache", out_file); MissType::ForcedRecache } + None => { + debug!("[{}]: Cache timed out", out_file); + MissType::TimedOut + } }; // Cache miss, so compile it. @@ -296,6 +316,8 @@ pub enum MissType { Normal, /// Cache lookup was overridden, recompilation was forced. ForcedRecache, + /// Cache took too long to respond. + TimedOut, } /// Information about a successful cache write. @@ -619,6 +641,7 @@ mod test { use std::time::Duration; use std::usize; use test::utils::*; + use tokio_core::reactor::Core; #[test] fn test_detect_compiler_kind_gcc() { @@ -717,6 +740,8 @@ mod test { let creator = new_creator(); let f = TestFixture::new(); let pool = CpuPool::new(1); + let core = Core::new().unwrap(); + let handle = core.handle(); let storage = DiskCache::new(&f.tempdir.path().join("cache"), usize::MAX, &pool); @@ -753,7 +778,8 @@ mod test { arguments.clone(), cwd.clone(), CacheControl::Default, - pool.clone()).wait().unwrap(); + pool.clone(), + handle.clone()).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); match cached { @@ -776,7 +802,8 @@ mod test { arguments, cwd, CacheControl::Default, - pool.clone()).wait().unwrap(); + pool.clone(), + handle).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); assert_eq!(CompileResult::CacheHit(Duration::new(0, 0)), cached); @@ -792,6 +819,8 @@ mod test { let creator = new_creator(); let f = TestFixture::new(); let pool = CpuPool::new(1); + let core = Core::new().unwrap(); + let handle = core.handle(); let storage = DiskCache::new(&f.tempdir.path().join("cache"), usize::MAX, &pool); @@ -828,7 +857,8 @@ mod test { arguments.clone(), cwd.clone(), CacheControl::Default, - pool.clone()).wait().unwrap(); + pool.clone(), + handle.clone()).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); match cached { @@ -852,7 +882,8 @@ mod test { arguments, cwd, CacheControl::Default, - pool).wait().unwrap(); + pool, + handle).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); assert_eq!(CompileResult::CacheHit(Duration::new(0, 0)), cached); @@ -868,6 +899,8 @@ mod test { let creator = new_creator(); let f = TestFixture::new(); let pool = CpuPool::new(1); + let core = Core::new().unwrap(); + let handle = core.handle(); let storage = DiskCache::new(&f.tempdir.path().join("cache"), usize::MAX, &pool); @@ -908,7 +941,8 @@ mod test { arguments.clone(), cwd.clone(), CacheControl::Default, - pool.clone()).wait().unwrap(); + pool.clone(), + handle.clone()).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); match cached { @@ -928,7 +962,8 @@ mod test { arguments, cwd, CacheControl::ForceRecache, - pool).wait().unwrap(); + pool, + handle).wait().unwrap(); // Ensure that the object file was created. assert_eq!(true, fs::metadata(&obj).and_then(|m| Ok(m.len() > 0)).unwrap()); match cached { @@ -950,6 +985,8 @@ mod test { let creator = new_creator(); let f = TestFixture::new(); let pool = CpuPool::new(1); + let core = Core::new().unwrap(); + let handle = core.handle(); let storage = DiskCache::new(&f.tempdir.path().join("cache"), usize::MAX, &pool); @@ -973,7 +1010,8 @@ mod test { arguments, cwd, CacheControl::Default, - pool).wait().unwrap(); + pool, + handle).wait().unwrap(); assert_eq!(cached, CompileResult::Error); assert_eq!(exit_status(1), res.status); // Shouldn't get anything on stdout, since that would just be preprocessor spew! diff --git a/src/server.rs b/src/server.rs index 2e396b71a..c12f2292a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -557,7 +557,8 @@ impl SccacheService arguments, cwd, cache_control, - self.pool.clone()); + self.pool.clone(), + self.handle.clone()); let me = self.clone(); let task = result.then(move |result| { let mut res = ServerResponse::new(); @@ -583,6 +584,10 @@ impl SccacheService stats.cache_misses += 1; stats.forced_recaches += 1; } + MissType::TimedOut => { + stats.cache_misses += 1; + stats.cache_timeouts += 1; + } } stats.cache_read_miss_duration += duration; cache_write = Some(future); @@ -663,6 +668,8 @@ struct ServerStats { pub cache_hits: u64, /// The count of cache misses for handled compile requests. pub cache_misses: u64, + /// The count of cache misses because the cache took too long to respond. + pub cache_timeouts: u64, /// The count of compilations which were successful but couldn't be cached. pub non_cacheable_compilations: u64, /// The count of compilations which forcibly ignored the cache. @@ -692,6 +699,7 @@ impl Default for ServerStats { cache_errors: u64::default(), cache_hits: u64::default(), cache_misses: u64::default(), + cache_timeouts: u64::default(), non_cacheable_compilations: u64::default(), forced_recaches: u64::default(), cache_write_errors: u64::default(), @@ -734,6 +742,7 @@ impl ServerStats { set_stat!(stats_vec, self.requests_executed, "Compile requests executed"); set_stat!(stats_vec, self.cache_hits, "Cache hits"); set_stat!(stats_vec, self.cache_misses, "Cache misses"); + set_stat!(stats_vec, self.cache_timeouts, "Cache timeouts"); set_stat!(stats_vec, self.forced_recaches, "Forced recaches"); set_stat!(stats_vec, self.cache_write_errors, "Cache write errors"); set_stat!(stats_vec, self.compile_fails, "Compilation failures");