Skip to content

Commit

Permalink
Make No. of Transport Threads == Available CPUs (#56488) (#56780)
Browse files Browse the repository at this point in the history
We never do any file IO or other blocking work on the transport threads
so no tangible benefit can be derived from using more threads than CPUs
for IO.
There are however significant downsides to using more threads than necessary
with Netty in particular. Since we use the default setting for
`io.netty.allocator.useCacheForAllThreads` which is `true` we end up
using up to `16MB` of thread local buffer cache for each transport thread.
Meaning we potentially waste CPUs * 16MB of heap for unnecessary IO threads in addition to obvious inefficiencies of artificially adding extra context switches.
  • Loading branch information
original-brownbear committed May 14, 2020
1 parent b718193 commit 14a042f
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
Expand Up @@ -78,7 +78,7 @@ public class Netty4Transport extends TcpTransport {

public static final Setting<Integer> WORKER_COUNT =
new Setting<>("transport.netty.worker_count",
(s) -> Integer.toString(EsExecutors.allocatedProcessors(s) * 2),
(s) -> Integer.toString(EsExecutors.allocatedProcessors(s)),
(s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), Property.NodeScope);

public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting(
Expand Down
Expand Up @@ -57,7 +57,7 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin {

public static final Setting<Integer> NIO_WORKER_COUNT =
new Setting<>("transport.nio.worker_count",
(s) -> Integer.toString(EsExecutors.allocatedProcessors(s) * 2),
(s) -> Integer.toString(EsExecutors.allocatedProcessors(s)),
(s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope);
public static final Setting<Integer> NIO_HTTP_WORKER_COUNT =
intSetting("http.nio.worker_count", 0, 0, Setting.Property.NodeScope);
Expand Down

0 comments on commit 14a042f

Please sign in to comment.