Skip to content

Commit

Permalink
use_file: Remove use of spin-locks
Browse files Browse the repository at this point in the history
Remove the general purpose spin-lock from getrandom, and don't spin
when polling /dev/random. We can just use the poll() call itself to
have threads wait for /dev/urandom to be safe.

We also remove the use of spin locks when opening the persistent fd for
platforms that require it. We take an approach similar to unsync_init,
but in the very rare case that we open the file twice, we close the
unnecessary fd.

Signed-off-by: Joe Richey <joerichey@google.com>
  • Loading branch information
josephlr committed Jan 2, 2020
1 parent d661aa7 commit ce72175
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 94 deletions.
117 changes: 86 additions & 31 deletions src/use_file.rs
Expand Up @@ -7,8 +7,10 @@
// except according to those terms.

//! Implementations that just need to read from a file
use crate::util_libc::{last_os_error, open_readonly, sys_fill_exact, LazyFd};
use crate::util::LazyUsize;
use crate::util_libc::{last_os_error, open_readonly, sys_fill_exact};
use crate::Error;
use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};

#[cfg(target_os = "redox")]
const FILE_PATH: &str = "rand:\0";
Expand All @@ -21,10 +23,21 @@ const FILE_PATH: &str = "rand:\0";
target_os = "illumos"
))]
const FILE_PATH: &str = "/dev/random\0";
#[cfg(any(target_os = "android", target_os = "linux"))]
const FILE_PATH: &str = "/dev/urandom\0";

pub fn getrandom_inner(dest: &mut [u8]) -> Result<(), Error> {
static FD: LazyFd = LazyFd::new();
let fd = FD.init(init_file).ok_or_else(last_os_error)?;
// On Linux, check that /dev/urandom will not return insecure values.
#[cfg(any(target_os = "android", target_os = "linux"))]
{
static RANDOM_INIT: LazyUsize = LazyUsize::new();
if RANDOM_INIT.unsync_init(random_init) == LazyUsize::UNINIT {
return Err(last_os_error());
}
}

static FD: RngFd = RngFd::new();
let fd = FD.sync_init().ok_or_else(last_os_error)?;
let read = |buf: &mut [u8]| unsafe { libc::read(fd, buf.as_mut_ptr() as *mut _, buf.len()) };

if cfg!(target_os = "emscripten") {
Expand All @@ -38,36 +51,78 @@ pub fn getrandom_inner(dest: &mut [u8]) -> Result<(), Error> {
Ok(())
}

cfg_if! {
if #[cfg(any(target_os = "android", target_os = "linux"))] {
fn init_file() -> Option<libc::c_int> {
// Poll /dev/random to make sure it is ok to read from /dev/urandom.
let mut pfd = libc::pollfd {
fd: unsafe { open_readonly("/dev/random\0")? },
events: libc::POLLIN,
revents: 0,
};
// Handle to the device file used to retrive random numbers
struct RngFd(AtomicUsize);

impl RngFd {
const fn new() -> Self {
Self(AtomicUsize::new(LazyUsize::UNINIT))
}

let ret = loop {
// A negative timeout means an infinite timeout.
let res = unsafe { libc::poll(&mut pfd, 1, -1) };
if res == 1 {
break unsafe { open_readonly("/dev/urandom\0") };
} else if res < 0 {
let e = last_os_error().raw_os_error();
if e == Some(libc::EINTR) || e == Some(libc::EAGAIN) {
continue;
}
}
// We either hard failed, or poll() returned the wrong pfd.
break None;
};
unsafe { libc::close(pfd.fd) };
ret
// Initializes and returns the file descriptor. Similar to open_readonly(),
// None is returned on failure. On success, this function will always return
// the same file descriptor, even from different threads. Note that this
// does not mean only one file is ever opened, just that one "wins".
fn sync_init(&self) -> Option<libc::c_int> {
// Common and fast path with no contention. Don't wast time on CAS.
match self.0.load(Relaxed) {
LazyUsize::UNINIT => {}
current_val => return Some(current_val as libc::c_int),
}
} else {
fn init_file() -> Option<libc::c_int> {
unsafe { open_readonly(FILE_PATH) }

let fd = unsafe { open_readonly(FILE_PATH)? };
// The fd always fits in a usize without conflicting with UNINIT.
debug_assert!(fd >= 0 && (fd as usize) < LazyUsize::UNINIT);
let new_val = fd as usize;
// Relaxed ordering is fine, as we only have a single atomic variable.
match self.0.compare_and_swap(LazyUsize::UNINIT, new_val, Relaxed) {
// We won the race and initilaized the fd, so we are done.
LazyUsize::UNINIT => Some(fd),
// Some other thread beat us to initilizing the fd. This is quite
// rare as open(2) does not block when opening the random devices.
// In this case, cleanup the unnecessary fd we opened.
current_val => {
unsafe { libc::close(fd) };
Some(current_val as libc::c_int)
}
}
}
}

// Returns 0 if urandom is safe to read from, LazyUsize::UNINIT otherwise.
#[cfg(any(target_os = "android", target_os = "linux"))]
fn random_init() -> usize {
// Make sure the temporary fd used for polling is always closed.
struct FdGuard(libc::c_int);
impl Drop for FdGuard {
fn drop(&mut self) {
unsafe { libc::close(self.0) };
}
}

// Poll /dev/random to make sure it is ok to read from /dev/urandom.
let fd = match unsafe { open_readonly("/dev/random\0") } {
Some(fd) => FdGuard(fd),
None => return LazyUsize::UNINIT,
};
let mut pfd = libc::pollfd {
fd: fd.0,
events: libc::POLLIN,
revents: 0,
};

loop {
// A negative timeout means an infinite timeout.
let res = unsafe { libc::poll(&mut pfd, 1, -1) };
if res == 1 {
return 0;
} else if res < 0 {
let e = last_os_error().raw_os_error();
if e == Some(libc::EINTR) || e == Some(libc::EAGAIN) {
continue;
}
}
// We either hard failed, or poll() returned the wrong pfd.
return LazyUsize::UNINIT;
}
}
32 changes: 0 additions & 32 deletions src/util.rs
Expand Up @@ -35,8 +35,6 @@ impl LazyUsize {

// The initialization is not completed.
pub const UNINIT: usize = usize::max_value();
// The initialization is currently running.
pub const ACTIVE: usize = usize::max_value() - 1;

// Runs the init() function at least once, returning the value of some run
// of init(). Multiple callers can run their init() functions in parallel.
Expand All @@ -50,36 +48,6 @@ impl LazyUsize {
}
val
}

// Synchronously runs the init() function. Only one caller will have their
// init() function running at a time, and exactly one successful call will
// be run. init() returning UNINIT or ACTIVE will be considered a failure,
// and future calls to sync_init will rerun their init() function.
pub fn sync_init(&self, init: impl FnOnce() -> usize, mut wait: impl FnMut()) -> usize {
// Common and fast path with no contention. Don't wast time on CAS.
match self.0.load(Relaxed) {
Self::UNINIT | Self::ACTIVE => {}
val => return val,
}
// Relaxed ordering is fine, as we only have a single atomic variable.
loop {
match self.0.compare_and_swap(Self::UNINIT, Self::ACTIVE, Relaxed) {
Self::UNINIT => {
let val = init();
self.0.store(
match val {
Self::UNINIT | Self::ACTIVE => Self::UNINIT,
val => val,
},
Relaxed,
);
return val;
}
Self::ACTIVE => wait(),
val => return val,
}
}
}
}

// Identical to LazyUsize except with bool instead of usize.
Expand Down
31 changes: 0 additions & 31 deletions src/util_libc.rs
Expand Up @@ -89,37 +89,6 @@ impl Weak {
}
}

pub struct LazyFd(LazyUsize);

impl LazyFd {
pub const fn new() -> Self {
Self(LazyUsize::new())
}

// If init() returns Some(x), x should be nonnegative.
pub fn init(&self, init: impl FnOnce() -> Option<libc::c_int>) -> Option<libc::c_int> {
let fd = self.0.sync_init(
|| match init() {
// OK as val >= 0 and val <= c_int::MAX < usize::MAX
Some(val) => val as usize,
None => LazyUsize::UNINIT,
},
|| unsafe {
// We are usually waiting on an open(2) syscall to complete,
// which typically takes < 10us if the file is a device.
// However, we might end up waiting much longer if the entropy
// pool isn't initialized, but even in that case, this loop will
// consume a negligible amount of CPU on most platforms.
libc::usleep(10);
},
);
match fd {
LazyUsize::UNINIT => None,
val => Some(val as libc::c_int),
}
}
}

cfg_if! {
if #[cfg(any(target_os = "linux", target_os = "emscripten"))] {
use libc::open64 as open;
Expand Down

0 comments on commit ce72175

Please sign in to comment.