Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

walredo: simple tests and bench updates #3045

Merged
merged 6 commits into from
Jan 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
124 changes: 80 additions & 44 deletions pageserver/benches/bench_walredo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,33 +30,44 @@ fn redo_scenarios(c: &mut Criterion) {
let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf());
let conf = Box::leak(Box::new(conf));
let tenant_id = TenantId::generate();
// std::fs::create_dir_all(conf.tenant_path(&tenant_id)).unwrap();
let mut manager = PostgresRedoManager::new(conf, tenant_id);
manager.launch_process(14).unwrap();

let manager = PostgresRedoManager::new(conf, tenant_id);

let manager = Arc::new(manager);

tracing::info!("executing first");
short().execute(&manager).unwrap();
tracing::info!("first executed");

let thread_counts = [1, 2, 4, 8, 16];

let mut group = c.benchmark_group("short");
group.sampling_mode(criterion::SamplingMode::Flat);

for thread_count in thread_counts {
c.bench_with_input(
BenchmarkId::new("short-50record", thread_count),
group.bench_with_input(
BenchmarkId::new("short", thread_count),
&thread_count,
|b, thread_count| {
add_multithreaded_walredo_requesters(b, *thread_count, &manager, short, 50);
add_multithreaded_walredo_requesters(b, *thread_count, &manager, short);
},
);
}
drop(group);

let mut group = c.benchmark_group("medium");
group.sampling_mode(criterion::SamplingMode::Flat);

for thread_count in thread_counts {
c.bench_with_input(
BenchmarkId::new("medium-10record", thread_count),
group.bench_with_input(
BenchmarkId::new("medium", thread_count),
&thread_count,
|b, thread_count| {
add_multithreaded_walredo_requesters(b, *thread_count, &manager, medium, 10);
add_multithreaded_walredo_requesters(b, *thread_count, &manager, medium);
},
);
}
drop(group);
}

/// Sets up `threads` number of requesters to `request_redo`, with the given input.
Expand All @@ -65,46 +76,66 @@ fn add_multithreaded_walredo_requesters(
threads: u32,
manager: &Arc<PostgresRedoManager>,
input_factory: fn() -> Request,
request_repeats: usize,
) {
b.iter_batched_ref(
|| {
// barrier for all of the threads, and the benchmarked thread
let barrier = Arc::new(Barrier::new(threads as usize + 1));

let jhs = (0..threads)
.map(|_| {
std::thread::spawn({
let manager = manager.clone();
let barrier = barrier.clone();
move || {
let input = std::iter::repeat(input_factory())
.take(request_repeats)
.collect::<Vec<_>>();

barrier.wait();

execute_all(input, &manager).unwrap();

barrier.wait();
assert_ne!(threads, 0);

if threads == 1 {
b.iter_batched_ref(
|| Some(input_factory()),
|input| execute_all(input.take(), manager),
criterion::BatchSize::PerIteration,
);
} else {
let (work_tx, work_rx) = std::sync::mpsc::sync_channel(threads as usize);

let work_rx = std::sync::Arc::new(std::sync::Mutex::new(work_rx));

let barrier = Arc::new(Barrier::new(threads as usize + 1));

let jhs = (0..threads)
.map(|_| {
std::thread::spawn({
let manager = manager.clone();
let barrier = barrier.clone();
let work_rx = work_rx.clone();
move || loop {
// queue up and wait if we want to go another round
if work_rx.lock().unwrap().recv().is_err() {
break;
}
})

let input = Some(input_factory());

barrier.wait();

execute_all(input, &manager).unwrap();

barrier.wait();
}
})
.collect::<Vec<_>>();
})
.collect::<Vec<_>>();

(barrier, JoinOnDrop(jhs))
},
|input| {
let barrier = &input.0;
let _jhs = JoinOnDrop(jhs);

b.iter_batched(
|| {
for _ in 0..threads {
work_tx.send(()).unwrap()
}
},
|()| {
// start the work
barrier.wait();

// start the work
barrier.wait();
// wait for work to complete
barrier.wait();
},
criterion::BatchSize::PerIteration,
);

// wait for work to complete
barrier.wait();
},
criterion::BatchSize::PerIteration,
);
drop(work_tx);
}
}

struct JoinOnDrop(Vec<std::thread::JoinHandle<()>>);
Expand All @@ -121,7 +152,10 @@ impl Drop for JoinOnDrop {
}
}

fn execute_all(input: Vec<Request>, manager: &PostgresRedoManager) -> Result<(), WalRedoError> {
fn execute_all<I>(input: I, manager: &PostgresRedoManager) -> Result<(), WalRedoError>
where
I: IntoIterator<Item = Request>,
{
// just fire all requests as fast as possible
input.into_iter().try_for_each(|req| {
let page = req.execute(manager)?;
Expand All @@ -143,6 +177,7 @@ macro_rules! lsn {
}};
}

/// Short payload, 1132 bytes.
// pg_records are copypasted from log, where they are put with Debug impl of Bytes, which uses \0
// for null bytes.
#[allow(clippy::octal_escapes)]
Expand Down Expand Up @@ -172,6 +207,7 @@ fn short() -> Request {
}
}

/// Medium sized payload, serializes as 26393 bytes.
// see [`short`]
#[allow(clippy::octal_escapes)]
fn medium() -> Request {
Expand Down
Binary file added pageserver/fixtures/short_v14_redo.page
Binary file not shown.
107 changes: 107 additions & 0 deletions pageserver/src/walredo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1010,3 +1010,110 @@ fn build_get_page_msg(tag: BufferTag, buf: &mut Vec<u8>) {
tag.ser_into(buf)
.expect("serialize BufferTag should always succeed");
}

#[cfg(test)]
mod tests {
use super::{PostgresRedoManager, WalRedoManager};
use crate::repository::Key;
use crate::{config::PageServerConf, walrecord::NeonWalRecord};
use bytes::Bytes;
use std::str::FromStr;
use utils::{id::TenantId, lsn::Lsn};

#[test]
fn short_v14_redo() {
let expected = std::fs::read("fixtures/short_v14_redo.page").unwrap();

let h = RedoHarness::new().unwrap();

let page = h
.manager
.request_redo(
Key {
field1: 0,
field2: 1663,
field3: 13010,
field4: 1259,
field5: 0,
field6: 0,
},
Lsn::from_str("0/16E2408").unwrap(),
None,
short_records(),
14,
)
.unwrap();

assert_eq!(&expected, &*page);
}

#[test]
fn short_v14_fails_for_wrong_key_but_returns_zero_page() {
let h = RedoHarness::new().unwrap();

let page = h
.manager
.request_redo(
Key {
field1: 0,
field2: 1663,
// key should be 13010
field3: 13130,
field4: 1259,
field5: 0,
field6: 0,
},
Lsn::from_str("0/16E2408").unwrap(),
None,
short_records(),
14,
)
.unwrap();

// TODO: there will be some stderr printout, which is forwarded to tracing that could
// perhaps be captured as long as it's in the same thread.
assert_eq!(page, crate::ZERO_PAGE);
}

#[allow(clippy::octal_escapes)]
fn short_records() -> Vec<(Lsn, NeonWalRecord)> {
vec![
(
Lsn::from_str("0/16A9388").unwrap(),
NeonWalRecord::Postgres {
will_init: true,
rec: Bytes::from_static(b"j\x03\0\0\0\x04\0\0\xe8\x7fj\x01\0\0\0\0\0\n\0\0\xd0\x16\x13Y\0\x10\0\04\x03\xd4\0\x05\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x03\0\0\0\0\x80\xeca\x01\0\0\x01\0\xd4\0\xa0\x1d\0 \x04 \0\0\0\0/\0\x01\0\xa0\x9dX\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0.\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\00\x9f\x9a\x01P\x9e\xb2\x01\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x02\0!\0\x01\x08 \xff\xff\xff?\0\0\0\0\0\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\0\0\0\0\0\0\x80\xbf\0\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\0\0\0\0\x0c\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0/\0!\x80\x03+ \xff\xff\xff\x7f\0\0\0\0\0\xdf\x04\0\0pg_type\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0G\0\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\x0e\0\0\0\0@\x16D\x0e\0\0\0K\x10\0\0\x01\0pr \0\0\0\0\0\0\0\0\x01n\0\0\0\0\0\xd6\x02\0\0\x01\0\0\0[\x01\0\0\0\0\0\0\0\t\x04\0\0\x02\0\0\0\x01\0\0\0\n\0\0\0\n\0\0\0\x7f\0\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0\0\0C\x01\0\0\x15\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0.\0!\x80\x03+ \xff\xff\xff\x7f\0\0\0\0\0;\n\0\0pg_statistic\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xfd.\0\0\0\0\0\0\n\0\0\0\x02\0\0\0;\n\0\0\0\0\0\0\x13\0\0\0\0\0\xcbC\x13\0\0\0\x18\x0b\0\0\x01\0pr\x1f\0\0\0\0\0\0\0\0\x01n\0\0\0\0\0\xd6\x02\0\0\x01\0\0\0C\x01\0\0\0\0\0\0\0\t\x04\0\0\x01\0\0\0\x01\0\0\0\n\0\0\0\n\0\0\0\x7f\0\0\0\0\0\0\x02\0\x01")
}
),
(
Lsn::from_str("0/16D4080").unwrap(),
NeonWalRecord::Postgres {
will_init: false,
rec: Bytes::from_static(b"\xbc\0\0\0\0\0\0\0h?m\x01\0\0\0\0p\n\0\09\x08\xa3\xea\0 \x8c\0\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x02\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\x05\0\0\0\0@zD\x05\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\x02\0")
}
)
]
}

struct RedoHarness {
// underscored because unused, except for removal at drop
_repo_dir: tempfile::TempDir,
manager: PostgresRedoManager,
}

impl RedoHarness {
fn new() -> anyhow::Result<Self> {
let repo_dir = tempfile::tempdir()?;
let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf());
let conf = Box::leak(Box::new(conf));
let tenant_id = TenantId::generate();

let manager = PostgresRedoManager::new(conf, tenant_id);

Ok(RedoHarness {
_repo_dir: repo_dir,
manager,
})
}
}
}