Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for load-shedding. #1616

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion tonic/Cargo.toml
Expand Up @@ -73,7 +73,7 @@ h2 = {version = "0.3.17", optional = true}
hyper = {version = "0.14.26", features = ["full"], optional = true}
hyper-timeout = {version = "0.4", optional = true}
tokio-stream = "0.1"
tower = {version = "0.4.7", default-features = false, features = ["balance", "buffer", "discover", "limit", "load", "make", "timeout", "util"], optional = true}
tower = {version = "0.4.7", default-features = false, features = ["balance", "buffer", "discover", "limit", "load", "make", "timeout", "util", "load-shed"], optional = true}
axum = {version = "0.6.9", default_features = false, optional = true}

# rustls
Expand Down
11 changes: 11 additions & 0 deletions tonic/src/status.rs
Expand Up @@ -350,6 +350,17 @@ impl Status {
Err(err) => err,
};

// If the load shed middleware is enabled, respond to
// service overloaded with an appropriate grpc status.
let err = match err.downcast::<tower::load_shed::error::Overloaded>() {
Ok(_) => {
return Ok(Status::resource_exhausted(
"Too many active requests for the connection",
));
}
Err(err) => err,
};

if let Some(mut status) = find_status_in_source_chain(&*err) {
status.source = Some(err.into());
return Ok(status);
Expand Down
33 changes: 33 additions & 0 deletions tonic/src/transport/server/mod.rs
Expand Up @@ -59,6 +59,7 @@ use tower::{
layer::util::{Identity, Stack},
layer::Layer,
limit::concurrency::ConcurrencyLimitLayer,
load_shed::LoadShedLayer,
util::Either,
Service, ServiceBuilder,
};
Expand All @@ -81,6 +82,7 @@ const DEFAULT_HTTP2_KEEPALIVE_TIMEOUT_SECS: u64 = 20;
pub struct Server<L = Identity> {
trace_interceptor: Option<TraceInterceptor>,
concurrency_limit: Option<usize>,
load_shed: bool,
timeout: Option<Duration>,
#[cfg(feature = "tls")]
tls: Option<TlsAcceptor>,
Expand All @@ -103,6 +105,7 @@ impl Default for Server<Identity> {
Self {
trace_interceptor: None,
concurrency_limit: None,
load_shed: false,
timeout: None,
#[cfg(feature = "tls")]
tls: None,
Expand Down Expand Up @@ -173,6 +176,27 @@ impl<L> Server<L> {
}
}

/// Enable or disable load shedding. The default is disabled.
///
/// When load shedding is enabled, if the service responds with not ready
/// the request will immediately be rejected with a
/// [`resource_exhausted`](https://docs.rs/tonic/latest/tonic/struct.Status.html#method.resource_exhausted) error.
/// The default is to buffer requests. This is especially useful in combination with
/// setting a concurrency limit per connection.
///
/// # Example
///
/// ```
/// # use tonic::transport::Server;
/// # use tower_service::Service;
/// # let builder = Server::builder();
/// builder.load_shed(true);
/// ```
#[must_use]
pub fn load_shed(self, load_shed: bool) -> Self {
Server { load_shed, ..self }
}

/// Set a timeout on for all request handlers.
///
/// # Example
Expand Down Expand Up @@ -469,6 +493,7 @@ impl<L> Server<L> {
service_builder: self.service_builder.layer(new_layer),
trace_interceptor: self.trace_interceptor,
concurrency_limit: self.concurrency_limit,
load_shed: self.load_shed,
timeout: self.timeout,
#[cfg(feature = "tls")]
tls: self.tls,
Expand Down Expand Up @@ -507,6 +532,7 @@ impl<L> Server<L> {
{
let trace_interceptor = self.trace_interceptor.clone();
let concurrency_limit = self.concurrency_limit;
let load_shed = self.load_shed;
let init_connection_window_size = self.init_connection_window_size;
let init_stream_window_size = self.init_stream_window_size;
let max_concurrent_streams = self.max_concurrent_streams;
Expand All @@ -529,6 +555,7 @@ impl<L> Server<L> {
let svc = MakeSvc {
inner: svc,
concurrency_limit,
load_shed,
timeout,
trace_interceptor,
_io: PhantomData,
Expand Down Expand Up @@ -815,6 +842,7 @@ impl<S> fmt::Debug for Svc<S> {

struct MakeSvc<S, IO> {
concurrency_limit: Option<usize>,
load_shed: bool,
timeout: Option<Duration>,
inner: S,
trace_interceptor: Option<TraceInterceptor>,
Expand Down Expand Up @@ -848,6 +876,11 @@ where

let svc = ServiceBuilder::new()
.layer_fn(RecoverError::new)
.option_layer(if self.load_shed {
Some(LoadShedLayer::new())
} else {
None
})
.option_layer(concurrency_limit.map(ConcurrencyLimitLayer::new))
.layer_fn(|s| GrpcTimeout::new(s, timeout))
.service(svc);
Expand Down