Skip to content

Commit

Permalink
aya: Implement RingBuf
Browse files Browse the repository at this point in the history
This implements the userspace binding for RingBuf.

Instead of streaming the samples as heap buffers, the process_ring
function takes a callback to which we pass the event's byte region,
roughly following [libbpf]'s API design. This avoids a copy and allows
marking the consumer pointer in a timely manner.

[libbpf]: https://github.com/libbpf/libbpf/blob/master/src/ringbuf.c

Co-authored-by: William Findlay <william@williamfindlay.com>
  • Loading branch information
2 people authored and ajwerner committed Jul 1, 2023
1 parent 2423f0d commit 1e3b330
Show file tree
Hide file tree
Showing 4 changed files with 364 additions and 27 deletions.
59 changes: 56 additions & 3 deletions aya/src/bpf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use std::{
use aya_obj::{
btf::{BtfFeatures, BtfRelocationError},
generated::{BPF_F_SLEEPABLE, BPF_F_XDP_HAS_FRAGS},
maps::InvalidMapTypeError,
relocation::BpfRelocationError,
BpfSectionKind, Features,
};
Expand Down Expand Up @@ -369,12 +370,16 @@ impl<'a> BpfLoader<'a> {
continue;
}

let map_type: bpf_map_type =
obj.map_type()
.try_into()
.map_err(|InvalidMapTypeError { map_type }| {
BpfError::MapError(MapError::InvalidMapType { map_type })
})?;
match self.max_entries.get(name.as_str()) {
Some(size) => obj.set_max_entries(*size),
None => {
if obj.map_type() == BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32
&& obj.max_entries() == 0
{
if map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && obj.max_entries() == 0 {
obj.set_max_entries(
possible_cpus()
.map_err(|error| BpfError::FileError {
Expand All @@ -386,6 +391,12 @@ impl<'a> BpfLoader<'a> {
}
}
}
if map_type == BPF_MAP_TYPE_RINGBUF {
obj.set_max_entries(adjust_to_page_size(
crate::util::page_size() as u32,
obj.max_entries(),
))
}
let mut map = MapData {
obj,
fd: None,
Expand Down Expand Up @@ -627,6 +638,7 @@ fn parse_map(data: (String, MapData)) -> Result<(String, Map), BpfError> {
BPF_MAP_TYPE_PERCPU_HASH => Ok(Map::PerCpuHashMap(map)),
BPF_MAP_TYPE_LRU_PERCPU_HASH => Ok(Map::PerCpuLruHashMap(map)),
BPF_MAP_TYPE_PERF_EVENT_ARRAY => Ok(Map::PerfEventArray(map)),
BPF_MAP_TYPE_RINGBUF => Ok(Map::RingBuf(map)),
BPF_MAP_TYPE_SOCKHASH => Ok(Map::SockHash(map)),
BPF_MAP_TYPE_SOCKMAP => Ok(Map::SockMap(map)),
BPF_MAP_TYPE_BLOOM_FILTER => Ok(Map::BloomFilter(map)),
Expand All @@ -642,6 +654,47 @@ fn parse_map(data: (String, MapData)) -> Result<(String, Map), BpfError> {
Ok((name, map))
}

// Adjusts the byte size of a RingBuf map to match a power-of-two multiple of the page size.
//
// This mirrors the logic used by libbpf.
// See https://github.com/libbpf/libbpf/blob/ec6f716eda43fd0f4b865ddcebe0ce8cb56bf445/src/libbpf.c#L2461-L2463
const fn adjust_to_page_size(page_size: u32, byte_size: u32) -> u32 {
if byte_size == 0 {
return 0;
}
let quotient = byte_size / page_size;
let remainder = byte_size % page_size;
if remainder == 0 && quotient.is_power_of_two() {
return byte_size;
}
let mut pages_needed = quotient;
if remainder > 0 {
pages_needed += 1
}
pages_needed.next_power_of_two() * page_size
}

#[cfg(test)]
mod tests {
use super::adjust_to_page_size;

#[test]
fn test_adjust_to_page_size() {
const PAGE_SIZE: u32 = 4096;
[
(0, 0),
(4096, 1),
(4096, 4095),
(4096, 4096),
(8192, 4097),
(8192, 8192),
(16384, 8193),
]
.into_iter()
.for_each(|(exp, input)| assert_eq!(exp, adjust_to_page_size(PAGE_SIZE, input)))
}
}

impl<'a> Default for BpfLoader<'a> {
fn default() -> Self {
BpfLoader::new()
Expand Down
6 changes: 6 additions & 0 deletions aya/src/maps/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ pub mod hash_map;
pub mod lpm_trie;
pub mod perf;
pub mod queue;
pub mod ring_buf;
pub mod sock;
pub mod stack;
pub mod stack_trace;
Expand All @@ -81,6 +82,7 @@ pub use lpm_trie::LpmTrie;
pub use perf::AsyncPerfEventArray;
pub use perf::PerfEventArray;
pub use queue::Queue;
pub use ring_buf::RingBuf;
pub use sock::{SockHash, SockMap};
pub use stack::Stack;
pub use stack_trace::StackTraceMap;
Expand Down Expand Up @@ -247,6 +249,8 @@ pub enum Map {
PerCpuLruHashMap(MapData),
/// A [`PerfEventArray`] map
PerfEventArray(MapData),
/// A [`RingBuf`] map
RingBuf(MapData),
/// A [`SockMap`] map
SockMap(MapData),
/// A [`SockHash`] map
Expand Down Expand Up @@ -275,6 +279,7 @@ impl Map {
Map::PerCpuHashMap(map) => map.obj.map_type(),
Map::PerCpuLruHashMap(map) => map.obj.map_type(),
Map::PerfEventArray(map) => map.obj.map_type(),
Map::RingBuf(map) => map.obj.map_type(),
Map::SockHash(map) => map.obj.map_type(),
Map::SockMap(map) => map.obj.map_type(),
Map::BloomFilter(map) => map.obj.map_type(),
Expand Down Expand Up @@ -336,6 +341,7 @@ impl_try_from_map!(
SockMap from Map::SockMap,
PerfEventArray from Map::PerfEventArray,
StackTraceMap from Map::StackTraceMap,
RingBuf from Map::RingBuf,
);

#[cfg(feature = "async")]
Expand Down

0 comments on commit 1e3b330

Please sign in to comment.