diff --git a/src/use_file.rs b/src/use_file.rs index d3adaf2a..6e50955c 100644 --- a/src/use_file.rs +++ b/src/use_file.rs @@ -7,8 +7,11 @@ // except according to those terms. //! Implementations that just need to read from a file -use crate::util_libc::{last_os_error, open_readonly, sys_fill_exact, LazyFd}; +use crate::util::LazyUsize; +use crate::util_libc::{open_readonly, sys_fill_exact}; use crate::Error; +use core::cell::UnsafeCell; +use core::sync::atomic::{AtomicUsize, Ordering::Relaxed}; #[cfg(target_os = "redox")] const FILE_PATH: &str = "rand:\0"; @@ -21,10 +24,11 @@ const FILE_PATH: &str = "rand:\0"; target_os = "illumos" ))] const FILE_PATH: &str = "/dev/random\0"; +#[cfg(any(target_os = "android", target_os = "linux"))] +const FILE_PATH: &str = "/dev/urandom\0"; pub fn getrandom_inner(dest: &mut [u8]) -> Result<(), Error> { - static FD: LazyFd = LazyFd::new(); - let fd = FD.init(init_file).ok_or_else(last_os_error)?; + let fd = get_rng_fd()?; let read = |buf: &mut [u8]| unsafe { libc::read(fd, buf.as_mut_ptr() as *mut _, buf.len()) }; if cfg!(target_os = "emscripten") { @@ -38,36 +42,96 @@ pub fn getrandom_inner(dest: &mut [u8]) -> Result<(), Error> { Ok(()) } -cfg_if! { - if #[cfg(any(target_os = "android", target_os = "linux"))] { - fn init_file() -> Option { - // Poll /dev/random to make sure it is ok to read from /dev/urandom. - let mut pfd = libc::pollfd { - fd: unsafe { open_readonly("/dev/random\0")? }, - events: libc::POLLIN, - revents: 0, - }; - - let ret = loop { - // A negative timeout means an infinite timeout. - let res = unsafe { libc::poll(&mut pfd, 1, -1) }; - if res == 1 { - break unsafe { open_readonly("/dev/urandom\0") }; - } else if res < 0 { - let e = last_os_error().raw_os_error(); - if e == Some(libc::EINTR) || e == Some(libc::EAGAIN) { - continue; - } - } - // We either hard failed, or poll() returned the wrong pfd. - break None; - }; - unsafe { libc::close(pfd.fd) }; - ret +// Returns the file descriptor for the device file used to retrieve random +// bytes. The file will be opened exactly once. All successful calls will +// return the same file descriptor. This file descriptor is never closed. +fn get_rng_fd() -> Result { + static FD: AtomicUsize = AtomicUsize::new(LazyUsize::UNINIT); + fn get_fd() -> Option { + match FD.load(Relaxed) { + LazyUsize::UNINIT => None, + val => Some(val as libc::c_int), } - } else { - fn init_file() -> Option { - unsafe { open_readonly(FILE_PATH) } + } + + // Use double-checked locking to avoid acquiring the lock if possible. + if let Some(fd) = get_fd() { + return Ok(fd); + } + + // SAFETY: We use the mutex only in this method, and we always unlock it + // before returning, making sure we don't violate the pthread_mutex_t API. + static MUTEX: Mutex = Mutex::new(); + unsafe { MUTEX.lock() }; + let _guard = DropGuard(|| unsafe { MUTEX.unlock() }); + + if let Some(fd) = get_fd() { + return Ok(fd); + } + + // On Linux, /dev/urandom might return insecure values. + #[cfg(any(target_os = "android", target_os = "linux"))] + wait_until_rng_ready()?; + + let fd = unsafe { open_readonly(FILE_PATH)? }; + // The fd always fits in a usize without conflicting with UNINIT. + debug_assert!(fd >= 0 && (fd as usize) < LazyUsize::UNINIT); + FD.store(fd as usize, Relaxed); + + Ok(fd) +} + +// Succeeds once /dev/urandom is safe to read from +#[cfg(any(target_os = "android", target_os = "linux"))] +fn wait_until_rng_ready() -> Result<(), Error> { + // Poll /dev/random to make sure it is ok to read from /dev/urandom. + let fd = unsafe { open_readonly("/dev/random\0")? }; + let mut pfd = libc::pollfd { + fd, + events: libc::POLLIN, + revents: 0, + }; + let _guard = DropGuard(|| unsafe { + libc::close(fd); + }); + + loop { + // A negative timeout means an infinite timeout. + let res = unsafe { libc::poll(&mut pfd, 1, -1) }; + if res >= 0 { + assert_eq!(res, 1); // We only used one fd, and cannot timeout. + return Ok(()); } + let err = crate::util_libc::last_os_error(); + match err.raw_os_error() { + Some(libc::EINTR) | Some(libc::EAGAIN) => continue, + _ => return Err(err), + } + } +} + +struct Mutex(UnsafeCell); + +impl Mutex { + const fn new() -> Self { + Self(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)) + } + unsafe fn lock(&self) { + let r = libc::pthread_mutex_lock(self.0.get()); + debug_assert_eq!(r, 0); + } + unsafe fn unlock(&self) { + let r = libc::pthread_mutex_unlock(self.0.get()); + debug_assert_eq!(r, 0); + } +} + +unsafe impl Sync for Mutex {} + +struct DropGuard(F); + +impl Drop for DropGuard { + fn drop(&mut self) { + self.0() } } diff --git a/src/util.rs b/src/util.rs index e0e93075..8dbd8ae8 100644 --- a/src/util.rs +++ b/src/util.rs @@ -35,8 +35,6 @@ impl LazyUsize { // The initialization is not completed. pub const UNINIT: usize = usize::max_value(); - // The initialization is currently running. - pub const ACTIVE: usize = usize::max_value() - 1; // Runs the init() function at least once, returning the value of some run // of init(). Multiple callers can run their init() functions in parallel. @@ -50,36 +48,6 @@ impl LazyUsize { } val } - - // Synchronously runs the init() function. Only one caller will have their - // init() function running at a time, and exactly one successful call will - // be run. init() returning UNINIT or ACTIVE will be considered a failure, - // and future calls to sync_init will rerun their init() function. - pub fn sync_init(&self, init: impl FnOnce() -> usize, mut wait: impl FnMut()) -> usize { - // Common and fast path with no contention. Don't wast time on CAS. - match self.0.load(Relaxed) { - Self::UNINIT | Self::ACTIVE => {} - val => return val, - } - // Relaxed ordering is fine, as we only have a single atomic variable. - loop { - match self.0.compare_and_swap(Self::UNINIT, Self::ACTIVE, Relaxed) { - Self::UNINIT => { - let val = init(); - self.0.store( - match val { - Self::UNINIT | Self::ACTIVE => Self::UNINIT, - val => val, - }, - Relaxed, - ); - return val; - } - Self::ACTIVE => wait(), - val => return val, - } - } - } } // Identical to LazyUsize except with bool instead of usize. diff --git a/src/util_libc.rs b/src/util_libc.rs index 5a051701..fbf75883 100644 --- a/src/util_libc.rs +++ b/src/util_libc.rs @@ -89,37 +89,6 @@ impl Weak { } } -pub struct LazyFd(LazyUsize); - -impl LazyFd { - pub const fn new() -> Self { - Self(LazyUsize::new()) - } - - // If init() returns Some(x), x should be nonnegative. - pub fn init(&self, init: impl FnOnce() -> Option) -> Option { - let fd = self.0.sync_init( - || match init() { - // OK as val >= 0 and val <= c_int::MAX < usize::MAX - Some(val) => val as usize, - None => LazyUsize::UNINIT, - }, - || unsafe { - // We are usually waiting on an open(2) syscall to complete, - // which typically takes < 10us if the file is a device. - // However, we might end up waiting much longer if the entropy - // pool isn't initialized, but even in that case, this loop will - // consume a negligible amount of CPU on most platforms. - libc::usleep(10); - }, - ); - match fd { - LazyUsize::UNINIT => None, - val => Some(val as libc::c_int), - } - } -} - cfg_if! { if #[cfg(any(target_os = "linux", target_os = "emscripten"))] { use libc::open64 as open; @@ -129,15 +98,15 @@ cfg_if! { } // SAFETY: path must be null terminated, FD must be manually closed. -pub unsafe fn open_readonly(path: &str) -> Option { +pub unsafe fn open_readonly(path: &str) -> Result { debug_assert!(path.as_bytes().last() == Some(&0)); - let fd = open(path.as_ptr() as *mut _, libc::O_RDONLY | libc::O_CLOEXEC); + let fd = open(path.as_ptr() as *const _, libc::O_RDONLY | libc::O_CLOEXEC); if fd < 0 { - return None; + return Err(last_os_error()); } // O_CLOEXEC works on all Unix targets except for older Linux kernels (pre // 2.6.23), so we also use an ioctl to make sure FD_CLOEXEC is set. #[cfg(target_os = "linux")] libc::ioctl(fd, libc::FIOCLEX); - Some(fd) + Ok(fd) }