From c7684b3159431c62318857c463e6f3ca1085fae7 Mon Sep 17 00:00:00 2001 From: Thibaut Ackermann Date: Fri, 1 Nov 2019 10:45:59 +0100 Subject: [PATCH] add test_sched.rs with sched_affinity test --- test/test.rs | 1 + test/test_sched.rs | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 test/test_sched.rs diff --git a/test/test.rs b/test/test.rs index 242605005b..a46f5c2abf 100644 --- a/test/test.rs +++ b/test/test.rs @@ -118,6 +118,7 @@ mod test_net; mod test_nix_path; mod test_poll; mod test_pty; +mod test_sched; #[cfg(any(target_os = "android", target_os = "freebsd", target_os = "ios", diff --git a/test/test_sched.rs b/test/test_sched.rs new file mode 100644 index 0000000000..d7c48b6ae7 --- /dev/null +++ b/test/test_sched.rs @@ -0,0 +1,36 @@ + +#[cfg(any(target_os = "android", + target_os = "linux"))] +#[test] +fn test_sched_affinity() { + use nix::sched::{sched_getaffinity, sched_setaffinity, CpuSet}; + use nix::unistd::Pid; + use std::mem; + + // If pid is zero, then the mask of the calling process is returned. + let initial_affinity = sched_getaffinity(Pid::from_raw(0)).unwrap(); + let mut at_least_one_cpu = false; + let mut last_valid_cpu = 0; + for field in 0..(8 * mem::size_of::()) { + if initial_affinity.is_set(field).unwrap() { + at_least_one_cpu = true; + last_valid_cpu = field; + } + } + assert!(at_least_one_cpu); + + // Now restrict the running CPU + let mut new_affinity = CpuSet::new(); + new_affinity.set(last_valid_cpu).unwrap(); + sched_setaffinity(Pid::from_raw(0), &new_affinity).unwrap(); + + // And now re-check the affinity which should be only the one we set. + let updated_affinity = sched_getaffinity(Pid::from_raw(0)).unwrap(); + for field in 0..(8 * mem::size_of::()) { + // Should be set only for the CPU we set previously + assert_eq!(updated_affinity.is_set(field).unwrap(), field==last_valid_cpu) + } + + // Finally, reset the initial CPU set + sched_setaffinity(Pid::from_raw(0), &initial_affinity).unwrap(); +}