diff --git a/.circleci/config.yml b/.circleci/config.yml index e25909b7..d69a1d2d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,7 +12,7 @@ jobs: - run: docker run -h test.example.com --volumes-from certs -d --privileged --name test-docker-daemon docker:stable-dind --storage-driver=overlay --tlsverify --tlscacert=/certs/ca.pem --tlscert=/certs/cert.pem --tlskey=/certs/key.pem - run: docker run --rm --volumes-from certs --privileged --rm --entrypoint=chmod docker:stable-dind 644 /certs/key.pem /certs/ca-key.pem - run: docker build -t bollard . - - run: docker run -ti -e DOCKER_CERT_PATH=/certs -e DOCKER_HOST='tcp://test.example.com:2376' --volumes-from certs --rm --link test-docker-daemon:docker bollard cargo test --features test_ssl -- --test test_version_ssl + - run: docker run -ti -e DOCKER_CERT_PATH=/certs -e DOCKER_HOST='tcp://test.example.com:2376' --volumes-from certs --rm --link test-docker-daemon:docker bollard cargo test --features test_ssl,ct_logs -- --test test_version_ssl test_http: docker: - image: docker:20.10.11 diff --git a/Cargo.toml b/Cargo.toml index 80395602..beac7f50 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,25 +19,27 @@ test_ssl = ["ssl"] # Enable tests specifically for macos test_macos = [] # Enable rustls / ssl -ssl = ["hyper-rustls", "rustls", "rustls-native-certs", "webpki-roots", "ct-logs"] +ssl = ["hyper-rustls", "rustls", "rustls-native-certs", "webpki-roots"] +ct_logs = ["ssl", "ct-logs"] [dependencies] base64 = "0.13" bollard-stubs = { version = "1.41.0" } bytes = "1" chrono = { version = "0.4", features = ["serde"] } -ct-logs = { version = "0.8.0", optional = true } +ct-logs = { version = "0.9.0", optional = true } dirs-next = "2.0" futures-core = "0.3" futures-util = "0.3" hex = "0.4.2" http = "0.2" hyper = { version = "0.14", features = ["client", "tcp", "http1", "http2", "stream"] } -hyper-rustls = { version = "0.22", optional = true } +hyper-rustls = { version = "0.23", optional = true } log = "0.4" pin-project = "1.0.2" -rustls = { version = "0.19", optional = true } -rustls-native-certs = { version = "0.5.0", optional = true } +rustls = { version = "0.20", optional = true } +rustls-native-certs = { version = "0.6.0", optional = true } +rustls-pemfile = "0.2" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" @@ -46,10 +48,11 @@ tokio = { version = "1.7", features = ["time", "fs", "net", "rt", "rt-multi-thre thiserror = "1.0" tokio-util = { version = "0.6", features = ["codec"] } url = "2.2" -webpki-roots = { version = "0.21", optional = true } +webpki-roots = { version = "0.22", optional = true } +webpki = "0.22" [dev-dependencies] -env_logger = "0.8" +env_logger = "0.9" flate2 = "1.0" tar = "0.4" tokio = { version = "1.7", features = ["time", "fs", "net", "rt", "rt-multi-thread", "macros", "io-std"] } diff --git a/examples/build.rs b/examples/build.rs index 300c8c22..82584d37 100644 --- a/examples/build.rs +++ b/examples/build.rs @@ -43,8 +43,7 @@ async fn main() { platform: "linux/x86_64", }; - let mut image_build_stream = docker - .build_image(build_image_options, None, None); + let mut image_build_stream = docker.build_image(build_image_options, None, None); while let Some(msg) = image_build_stream.next().await { println!("Message: {:?}", msg); diff --git a/examples/hoover.rs b/examples/hoover.rs index 07c519c8..a20e4d74 100644 --- a/examples/hoover.rs +++ b/examples/hoover.rs @@ -1,7 +1,10 @@ //! Removes old docker containers, images, volumes and networks -use bollard::{container::PruneContainersOptions, image::PruneImagesOptions, network::PruneNetworksOptions, volume::PruneVolumesOptions}; use bollard::Docker; +use bollard::{ + container::PruneContainersOptions, image::PruneImagesOptions, network::PruneNetworksOptions, + volume::PruneVolumesOptions, +}; use chrono::{Duration, Utc}; use std::collections::HashMap; @@ -18,25 +21,33 @@ async fn main() -> Result<(), Box> { let mut prune_filters = HashMap::new(); prune_filters.insert("until", vec![timestamp]); - let prune = docker.prune_containers(Some(PruneContainersOptions { - filters: prune_filters.clone() - })).await?; + let prune = docker + .prune_containers(Some(PruneContainersOptions { + filters: prune_filters.clone(), + })) + .await?; println!("{:?}", prune); - let prune = docker.prune_images(Some(PruneImagesOptions { - filters: prune_filters.clone() - })).await?; + let prune = docker + .prune_images(Some(PruneImagesOptions { + filters: prune_filters.clone(), + })) + .await?; println!("{:?}", prune); - let prune = docker.prune_volumes(None::>).await?; + let prune = docker + .prune_volumes(None::>) + .await?; println!("{:?}", prune); - let prune = docker.prune_networks(Some(PruneNetworksOptions { - filters: prune_filters.clone() - })).await?; + let prune = docker + .prune_networks(Some(PruneNetworksOptions { + filters: prune_filters.clone(), + })) + .await?; println!("{:?}", prune); diff --git a/examples/post_dockerfile.rs b/examples/post_dockerfile.rs index 952bc068..097fc24b 100644 --- a/examples/post_dockerfile.rs +++ b/examples/post_dockerfile.rs @@ -1,4 +1,4 @@ -//! Post a dockerfile +//! Post a dockerfile //! //! tar cvf dockerfile.tar Dockerfile @@ -22,7 +22,7 @@ async fn main() { rm: true, ..Default::default() }; - + let filename = &args().nth(1).expect("needs first argument"); let archive = File::open(filename).await.expect("could not open file"); let stream = FramedRead::new(archive, BytesCodec::new()); diff --git a/src/container.rs b/src/container.rs index 92d0e329..c5ff50ad 100644 --- a/src/container.rs +++ b/src/container.rs @@ -562,8 +562,8 @@ pub struct MemoryStatsStatsV1 { pub total_pgmajfault: u64, pub total_pgpgin: u64, pub hierarchical_memsw_limit: Option, // only on OSX - pub shmem: Option, // only on linux kernel > 4.15.0-1106 - pub total_shmem: Option, // only on linux kernel > 4.15.0-1106 + pub shmem: Option, // only on linux kernel > 4.15.0-1106 + pub total_shmem: Option, // only on linux kernel > 4.15.0-1106 } /// Granular memory statistics for the container, v2 cgroups. diff --git a/src/docker.rs b/src/docker.rs index 14cf7c0c..ed2ec0ff 100644 --- a/src/docker.rs +++ b/src/docker.rs @@ -12,6 +12,8 @@ use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; +#[cfg(feature = "ct_logs")] +use std::time::SystemTime; use chrono::{DateTime, Utc}; use futures_core::Stream; @@ -27,9 +29,7 @@ use hyper_rustls::HttpsConnector; #[cfg(unix)] use hyperlocal::UnixConnector; #[cfg(feature = "ssl")] -use rustls::internal::pemfile; -#[cfg(feature = "ssl")] -use rustls::sign::{CertifiedKey, RSASigningKey}; +use rustls::sign::{CertifiedKey, RsaSigningKey}; use tokio::io::{split, AsyncRead, AsyncWrite}; use tokio_util::codec::FramedRead; @@ -64,6 +64,10 @@ pub const API_DEFAULT_VERSION: &ClientVersion = &ClientVersion { minor_version: 40, }; +/// 2 years from ct_logs 0.9 release +#[cfg(feature = "ct_logs")] +const TIMESTAMP_CT_LOGS_EXPIRY: u64 = 1681908462; + #[derive(Debug, Clone)] pub(crate) enum ClientType { #[cfg(unix)] @@ -243,7 +247,7 @@ impl DockerClientCertResolver { if let Ok(ref path) = from_env { Ok(Path::new(path).to_owned()) } else { - let home = dirs_next::home_dir().ok_or_else(|| NoCertPathError)?; + let home = dirs_next::home_dir().ok_or_else(|| NoHomePathError)?; Ok(home.join(".docker")) } } @@ -253,23 +257,33 @@ impl DockerClientCertResolver { } fn certs(path: &Path) -> Result, Error> { - Ok( - pemfile::certs(&mut Self::open_buffered(path)?).map_err(|_| CertPathError { + Ok(rustls_pemfile::certs(&mut Self::open_buffered(path)?) + .map_err(|_| CertPathError { path: path.to_path_buf(), - })?, - ) + })? + .iter() + .map(|v| rustls::Certificate(v.clone())) + .collect()) } fn keys(path: &Path) -> Result, Error> { let mut rdr = Self::open_buffered(path)?; - let keys = pemfile::rsa_private_keys(&mut rdr).map_err(|_| CertPathError { - path: path.to_path_buf(), - })?; + let mut keys = vec![]; + loop { + match rustls_pemfile::read_one(&mut rdr).map_err(|_| CertPathError { + path: path.to_path_buf(), + })? { + Some(rustls_pemfile::Item::RSAKey(key)) => keys.push(rustls::PrivateKey(key)), + Some(rustls_pemfile::Item::PKCS8Key(key)) => keys.push(rustls::PrivateKey(key)), + None => break, + _ => {} + } + } Ok(keys) } - fn docker_client_key(&self) -> Result { + fn docker_client_key(&self) -> Result, Error> { let all_certs = Self::certs(&self.ssl_cert)?; let mut all_keys = Self::keys(&self.ssl_key)?; @@ -282,20 +296,20 @@ impl DockerClientCertResolver { }); }; - let signing_key = RSASigningKey::new(&key).map_err(|_| CertParseError { + let signing_key = RsaSigningKey::new(&key).map_err(|_| CertParseError { path: self.ssl_key.to_owned(), })?; - Ok(CertifiedKey::new( + Ok(Arc::new(CertifiedKey::new( all_certs, - Arc::new(Box::new(signing_key)), - )) + Arc::new(signing_key), + ))) } } #[cfg(feature = "ssl")] -impl rustls::ResolvesClientCert for DockerClientCertResolver { - fn resolve(&self, _: &[&[u8]], _: &[rustls::SignatureScheme]) -> Option { +impl rustls::client::ResolvesClientCert for DockerClientCertResolver { + fn resolve(&self, _: &[&[u8]], _: &[rustls::SignatureScheme]) -> Option> { self.docker_client_key().ok() } @@ -332,25 +346,18 @@ impl Docker { /// ``` pub fn connect_with_ssl_defaults() -> Result { let cert_path = DockerClientCertResolver::default_cert_path()?; - if let Ok(ref host) = env::var("DOCKER_HOST") { - Docker::connect_with_ssl( - host, - &cert_path.join("key.pem"), - &cert_path.join("cert.pem"), - &cert_path.join("ca.pem"), - DEFAULT_TIMEOUT, - API_DEFAULT_VERSION, - ) - } else { - Docker::connect_with_ssl( - DEFAULT_DOCKER_HOST, - &cert_path.join("key.pem"), - &cert_path.join("cert.pem"), - &cert_path.join("ca.pem"), - DEFAULT_TIMEOUT, - API_DEFAULT_VERSION, - ) - } + Docker::connect_with_ssl( + if let Ok(ref host) = env::var("DOCKER_HOST") { + host + } else { + DEFAULT_DOCKER_HOST + }, + &cert_path.join("key.pem"), + &cert_path.join("cert.pem"), + &cert_path.join("ca.pem"), + DEFAULT_TIMEOUT, + API_DEFAULT_VERSION, + ) } /// Connect using secure HTTPS. @@ -394,38 +401,50 @@ impl Docker { // This ensures that using docker-machine-esque addresses work with Hyper. let client_addr = addr.replacen("tcp://", "", 1).replacen("https://", "", 1); - let mut config = rustls::ClientConfig::new(); - config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()]; - config.ct_logs = Some(&ct_logs::LOGS); + let mut root_store = rustls::RootCertStore::empty(); + for cert in rustls_native_certs::load_native_certs()? { + root_store.add(&rustls::Certificate(cert.0)).map_err(|err| NoNativeCertsError{ err })?; + } - config.root_store = match rustls_native_certs::load_native_certs() { - Ok(store) => store, - Err((Some(store), err)) => { - warn!("could not load all certificates: {}", err); - store - } - Err((None, err)) => { - warn!("cannot access native certificate store: {}", err); - config.root_store - } - }; + root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| { + rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( + ta.subject, + ta.spki, + ta.name_constraints, + ) + })); - let mut ca_pem = io::Cursor::new(fs::read(ssl_ca)?); + let mut ca_pem = io::Cursor::new(fs::read(ssl_ca).map_err(|_| CertPathError { + path: ssl_ca.to_owned(), + })?); - config - .root_store - .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); - config - .root_store - .add_pem_file(&mut ca_pem) - .map_err(|_| CertParseError { + root_store.add_parsable_certificates(&rustls_pemfile::certs(&mut ca_pem).map_err( + |_| CertParseError { path: ssl_ca.to_owned(), - })?; - - config.client_auth_cert_resolver = Arc::new(DockerClientCertResolver { - ssl_key: ssl_key.to_owned(), - ssl_cert: ssl_cert.to_owned(), - }); + }, + )?); + + #[cfg(feature = "ct_logs")] + let config = { + let ct_logs_expiry = + SystemTime::UNIX_EPOCH + Duration::from_secs(TIMESTAMP_CT_LOGS_EXPIRY); + rustls::ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(root_store) + .with_certificate_transparency_logs(&ct_logs::LOGS, ct_logs_expiry) + .with_client_cert_resolver(Arc::new(DockerClientCertResolver { + ssl_key: ssl_key.to_owned(), + ssl_cert: ssl_cert.to_owned(), + })) + }; + #[cfg(not(feature = "ct_logs"))] + let config = rustls::ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(root_store) + .with_client_cert_resolver(Arc::new(DockerClientCertResolver { + ssl_key: ssl_key.to_owned(), + ssl_cert: ssl_cert.to_owned(), + })); let mut http_connector = HttpConnector::new(); http_connector.enforce_http(false); @@ -572,12 +591,16 @@ impl Docker { /// let connection = Docker::connect_with_socket("/var/run/docker.sock", 120, API_DEFAULT_VERSION).unwrap(); /// connection.ping().map_ok(|_| Ok::<_, ()>(println!("Connected!"))); /// ``` - pub fn connect_with_socket(path: &str, timeout: u64, client_version: &ClientVersion) -> Result { + pub fn connect_with_socket( + path: &str, + timeout: u64, + client_version: &ClientVersion, + ) -> Result { #[cfg(unix)] let docker = Docker::connect_with_unix(path, timeout, client_version); #[cfg(windows)] let docker = Docker::connect_with_named_pipe(path, timeout, client_version); - + docker } } diff --git a/src/errors.rs b/src/errors.rs index 6bbec0a1..39ffddcd 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -6,8 +6,8 @@ use std::path::PathBuf; pub enum Error { /// Error emitted during client instantiation when the `DOCKER_CERT_PATH` environment variable /// is invalid. - #[error("Could not find DOCKER_CERT_PATH")] - NoCertPathError, + #[error("Could not find home directory")] + NoHomePathError, /// Generic error when reading a certificate from the filesystem #[error("Cannot open/read certificate with path: {path}")] CertPathError { @@ -28,6 +28,12 @@ pub enum Error { /// Path for the failing certificate file path: PathBuf, }, + /// Error emitted when the client is unable to load native certs for SSL + #[error("Could not load native certs")] + NoNativeCertsError { + /// The original error emitted. + err: webpki::Error, + }, /// Error emitted by the docker server, when it responds with a 404. #[error("API responded with a 404 not found: {message}")] DockerResponseNotFoundError { diff --git a/src/lib.rs b/src/lib.rs index 378391bf..ef25f076 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,7 @@ //! [API docs](https://docs.rs/bollard/). //! //! Version 0.11 re-enables Windows Named Pipe support. -//! +//! //! As of version 0.6, this project now generates API stubs from the upstream Docker-maintained //! [Swagger OpenAPI specification](https://docs.docker.com/engine/api/v1.41.yaml). The generated //! models are committed to this repository, but packaged in a separate crate diff --git a/src/named_pipe.rs b/src/named_pipe.rs index d2e65fce..346ab26a 100644 --- a/src/named_pipe.rs +++ b/src/named_pipe.rs @@ -19,12 +19,10 @@ use winapi::shared::winerror; use crate::docker::ClientType; use crate::uri::Uri; - - #[pin_project] pub struct NamedPipeStream { #[pin] - io: NamedPipeClient + io: NamedPipeClient, } impl NamedPipeStream { @@ -44,7 +42,7 @@ impl NamedPipeStream { time::sleep(Duration::from_millis(50)).await; }; - Ok(NamedPipeStream{ io: client }) + Ok(NamedPipeStream { io: client }) } } @@ -101,23 +99,19 @@ impl hyper::service::Service for NamedPipeConnector { let fut = async move { match destination.scheme() { Some(scheme) if scheme == NAMED_PIPE_SCHEME => Ok(()), - _ => { - Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!("Invalid scheme {:?}", destination.scheme()), - )) - } + _ => Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("Invalid scheme {:?}", destination.scheme()), + )), }?; match Uri::socket_path_dest(&destination, &ClientType::NamedPipe) { Some(path) => Ok(NamedPipeStream::connect(&path).await?), - None => { - Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!("Invalid uri {:?}", destination), - )) - } + None => Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("Invalid uri {:?}", destination), + )), } }; diff --git a/src/read.rs b/src/read.rs index 6e625dd0..ce24fbb5 100644 --- a/src/read.rs +++ b/src/read.rs @@ -6,11 +6,7 @@ use serde::de::DeserializeOwned; use std::pin::Pin; use std::string::String; use std::task::{Context, Poll}; -use std::{ - cmp, - io, - marker::PhantomData, -}; +use std::{cmp, io, marker::PhantomData}; use tokio::io::{AsyncRead, ReadBuf}; use tokio_util::codec::Decoder; @@ -263,7 +259,7 @@ mod tests { assert_eq!(codec.decode(&mut buf).unwrap(), None); assert!(buf.is_empty()); } - + #[test] fn json_partial_decode() { let mut buf = BytesMut::from(&b"{}\n{}\n\n{"[..]); @@ -278,7 +274,7 @@ mod tests { assert_eq!(codec.decode(&mut buf).unwrap(), Some(HashMap::new())); assert!(buf.is_empty()); } - + #[test] fn json_decode_lacking_newline() { env_logger::try_init().unwrap();