/
read_to_end.rs
119 lines (103 loc) · 3.42 KB
/
read_to_end.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
use crate::io::{AsyncRead, ReadBuf};
use std::future::Future;
use std::io;
use std::mem::{self, MaybeUninit};
use std::pin::Pin;
use std::task::{Context, Poll};
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct ReadToEnd<'a, R: ?Sized> {
reader: &'a mut R,
buf: &'a mut Vec<u8>,
/// The number of bytes appended to buf. This can be less than buf.len() if
/// the buffer was not empty when the operation was started.
read: usize,
}
pub(crate) fn read_to_end<'a, R>(reader: &'a mut R, buffer: &'a mut Vec<u8>) -> ReadToEnd<'a, R>
where
R: AsyncRead + Unpin + ?Sized,
{
ReadToEnd {
reader,
buf: buffer,
read: 0,
}
}
pub(super) fn read_to_end_internal<R: AsyncRead + ?Sized>(
buf: &mut Vec<u8>,
mut reader: Pin<&mut R>,
num_read: &mut usize,
cx: &mut Context<'_>,
) -> Poll<io::Result<usize>> {
loop {
// safety: The caller promised to prepare the buffer.
let ret = ready!(poll_read_to_end(buf, reader.as_mut(), cx));
match ret {
Err(err) => return Poll::Ready(Err(err)),
Ok(0) => return Poll::Ready(Ok(mem::replace(num_read, 0))),
Ok(num) => {
*num_read += num;
}
}
}
}
/// Tries to read from the provided AsyncRead.
///
/// The length of the buffer is increased by the number of bytes read.
fn poll_read_to_end<R: AsyncRead + ?Sized>(
buf: &mut Vec<u8>,
read: Pin<&mut R>,
cx: &mut Context<'_>,
) -> Poll<io::Result<usize>> {
// This uses an adaptive system to extend the vector when it fills. We want to
// avoid paying to allocate and zero a huge chunk of memory if the reader only
// has 4 bytes while still making large reads if the reader does have a ton
// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every
// time is 4,500 times (!) slower than this if the reader has a very small
// amount of data to return.
reserve(buf, 32);
let mut unused_capacity = ReadBuf::uninit(get_unused_capacity(buf));
ready!(read.poll_read(cx, &mut unused_capacity))?;
let n = unused_capacity.filled().len();
let new_len = buf.len() + n;
// This should no longer even be possible in safe Rust. An implementor
// would need to have unsafely *replaced* the buffer inside `ReadBuf`,
// which... yolo?
assert!(new_len <= buf.capacity());
unsafe {
buf.set_len(new_len);
}
Poll::Ready(Ok(n))
}
/// Allocates more memory and ensures that the unused capacity is prepared for use
/// with the `AsyncRead`.
fn reserve(buf: &mut Vec<u8>, bytes: usize) {
if buf.capacity() - buf.len() >= bytes {
return;
}
buf.reserve(bytes);
}
/// Returns the unused capacity of the provided vector.
fn get_unused_capacity(buf: &mut Vec<u8>) -> &mut [MaybeUninit<u8>] {
bytes::BufMut::bytes_mut(buf)
}
impl<A> Future for ReadToEnd<'_, A>
where
A: AsyncRead + ?Sized + Unpin,
{
type Output = io::Result<usize>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let Self { reader, buf, read } = &mut *self;
read_to_end_internal(buf, Pin::new(*reader), read, cx)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn assert_unpin() {
use std::marker::PhantomPinned;
crate::is_unpin::<ReadToEnd<'_, PhantomPinned>>();
}
}