Skip to content

Commit

Permalink
feat(kad): remove deprecated Config::set_connection_idle_timeout
Browse files Browse the repository at this point in the history
Related: libp2p#3844.
Related: libp2p#4656.

Pull-Request: libp2p#4659.
  • Loading branch information
leonzchang committed Oct 20, 2023
1 parent fe1098f commit 1e3ffeb
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 49 deletions.
1 change: 1 addition & 0 deletions examples/ipfs-kad/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ async fn main() -> Result<()> {
let store = kad::store::MemoryStore::new(key.public().to_peer_id());
kad::Behaviour::with_config(key.public().to_peer_id(), store, cfg)
})?
.with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5)))
.build();

// Add the bootnodes to the local routing table. `libp2p-dns` built
Expand Down
2 changes: 2 additions & 0 deletions protocols/kad/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
## 0.45.0 - unreleased

- Remove deprecated `kad::Config::set_connection_idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`.
See [PR 4659](https://github.com/libp2p/rust-libp2p/pull/4659).
- Emit `ModeChanged` event whenever we automatically reconfigure the mode.
See [PR 4503](https://github.com/libp2p/rust-libp2p/pull/4503).

Expand Down
19 changes: 2 additions & 17 deletions protocols/kad/src/behaviour.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,6 @@ pub struct Behaviour<TStore> {
/// The TTL of provider records.
provider_record_ttl: Option<Duration>,

/// How long to keep connections alive when they're idle.
connection_idle_timeout: Duration,

/// Queued events to return when the behaviour is being polled.
queued_events: VecDeque<ToSwarm<Event, HandlerIn>>,

Expand Down Expand Up @@ -182,7 +179,6 @@ pub struct Config {
record_filtering: StoreInserts,
provider_record_ttl: Option<Duration>,
provider_publication_interval: Option<Duration>,
connection_idle_timeout: Duration,
kbucket_inserts: BucketInserts,
caching: Caching,
}
Expand All @@ -199,7 +195,6 @@ impl Default for Config {
record_filtering: StoreInserts::Unfiltered,
provider_publication_interval: Some(Duration::from_secs(12 * 60 * 60)),
provider_record_ttl: Some(Duration::from_secs(24 * 60 * 60)),
connection_idle_timeout: Duration::from_secs(10),
kbucket_inserts: BucketInserts::OnConnected,
caching: Caching::Enabled { max_peers: 1 },
}
Expand Down Expand Up @@ -371,15 +366,6 @@ impl Config {
self
}

/// Sets the amount of time to keep connections alive when they're idle.
#[deprecated(
note = "Set a global idle connection timeout via `SwarmBuilder::idle_connection_timeout` instead."
)]
pub fn set_connection_idle_timeout(&mut self, duration: Duration) -> &mut Self {
self.connection_idle_timeout = duration;
self
}

/// Modifies the maximum allowed size of individual Kademlia packets.
///
/// It might be necessary to increase this value if trying to put large
Expand Down Expand Up @@ -456,7 +442,6 @@ where
put_record_job,
record_ttl: config.record_ttl,
provider_record_ttl: config.provider_record_ttl,
connection_idle_timeout: config.connection_idle_timeout,
external_addresses: Default::default(),
local_peer_id: id,
connections: Default::default(),
Expand Down Expand Up @@ -2082,9 +2067,9 @@ where
local_addr: local_addr.clone(),
send_back_addr: remote_addr.clone(),
};

let mut handler = Handler::new(
self.protocol_config.clone(),
self.connection_idle_timeout,
connected_point,
peer,
self.mode,
Expand All @@ -2106,9 +2091,9 @@ where
address: addr.clone(),
role_override,
};

let mut handler = Handler::new(
self.protocol_config.clone(),
self.connection_idle_timeout,
connected_point,
peer,
self.mode,
Expand Down
4 changes: 3 additions & 1 deletion protocols/kad/src/behaviour/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ use libp2p_core::{
use libp2p_identity as identity;
use libp2p_identity::PeerId;
use libp2p_noise as noise;
use libp2p_swarm::behaviour::ConnectionEstablished;
use libp2p_swarm::{self as swarm, ConnectionId, Swarm, SwarmEvent};
use libp2p_yamux as yamux;
use quickcheck::*;
Expand Down Expand Up @@ -71,7 +72,8 @@ fn build_node_with_config(cfg: Config) -> (Multiaddr, TestSwarm) {
transport,
behaviour,
local_id,
swarm::Config::with_async_std_executor(),
swarm::Config::with_async_std_executor()
.with_idle_connection_timeout(Duration::from_secs(5)),
);

let address: Multiaddr = Protocol::Memory(random::<u64>()).into();
Expand Down
37 changes: 6 additions & 31 deletions protocols/kad/src/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ use crate::QueryId;
use either::Either;
use futures::prelude::*;
use futures::stream::SelectAll;
use instant::Instant;
use libp2p_core::{upgrade, ConnectedPoint};
use libp2p_identity::PeerId;
use libp2p_swarm::handler::{
Expand All @@ -40,9 +39,7 @@ use libp2p_swarm::{
use log::trace;
use std::collections::VecDeque;
use std::task::Waker;
use std::{
error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll, time::Duration,
};
use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll};

const MAX_NUM_SUBSTREAMS: usize = 32;

Expand All @@ -60,9 +57,6 @@ pub struct Handler {
/// In client mode, we don't accept inbound substreams.
mode: Mode,

/// Time after which we close an idle connection.
idle_timeout: Duration,

/// Next unique ID of a connection.
next_connec_unique_id: UniqueConnecId,

Expand All @@ -79,9 +73,6 @@ pub struct Handler {
/// List of active inbound substreams with the state they are in.
inbound_substreams: SelectAll<InboundSubstreamState>,

/// Until when to keep the connection alive.
keep_alive: KeepAlive,

/// The connected endpoint of the connection that the handler
/// is associated with.
endpoint: ConnectedPoint,
Expand Down Expand Up @@ -465,7 +456,6 @@ struct UniqueConnecId(u64);
impl Handler {
pub fn new(
protocol_config: ProtocolConfig,
idle_timeout: Duration,
endpoint: ConnectedPoint,
remote_peer_id: PeerId,
mode: Mode,
Expand All @@ -484,21 +474,16 @@ impl Handler {
}
}

#[allow(deprecated)]
let keep_alive = KeepAlive::Until(Instant::now() + idle_timeout);

Handler {
protocol_config,
mode,
idle_timeout,
endpoint,
remote_peer_id,
next_connec_unique_id: UniqueConnecId(0),
inbound_substreams: Default::default(),
outbound_substreams: Default::default(),
num_requested_outbound_streams: 0,
pending_messages: Default::default(),
keep_alive,
protocol_status: None,
remote_supported_protocols: Default::default(),
connection_id,
Expand Down Expand Up @@ -718,7 +703,11 @@ impl ConnectionHandler for Handler {
}

fn connection_keep_alive(&self) -> KeepAlive {
self.keep_alive
if self.outbound_substreams.is_empty() && self.inbound_substreams.is_empty() {
return KeepAlive::No;
};

KeepAlive::Yes
}

fn poll(
Expand Down Expand Up @@ -769,20 +758,6 @@ impl ConnectionHandler for Handler {
});
}

let no_streams = self.outbound_substreams.is_empty() && self.inbound_substreams.is_empty();

self.keep_alive = {
#[allow(deprecated)]
match (no_streams, self.keep_alive) {
// No open streams. Preserve the existing idle timeout.
(true, k @ KeepAlive::Until(_)) => k,
// No open streams. Set idle timeout.
(true, _) => KeepAlive::Until(Instant::now() + self.idle_timeout),
// Keep alive for open streams.
(false, _) => KeepAlive::Yes,
}
};

Poll::Pending
}

Expand Down

0 comments on commit 1e3ffeb

Please sign in to comment.