-
Here is a example server package org.test;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.Unpooled;
import io.netty.channel.*;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.*;
public class ListenerExample {
public static void main(String[] args) throws Exception {
// Configure the server.
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.option(ChannelOption.SO_BACKLOG, 1024);
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
final ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new HttpServerCodec());
pipeline.addLast(new HttpObjectAggregator(Integer.MAX_VALUE));
pipeline.addLast(new HttpContentDecompressor(true));
pipeline.addLast(new ListenerExampleHandler());
}
});
Channel ch = b.bind(8081).sync().channel();
ch.closeFuture().sync();
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
private static class ListenerExampleHandler extends SimpleChannelInboundHandler<FullHttpRequest> {
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest msg) throws Exception {
System.out.println("New HTTP message: " + msg.uri());
// Emulate long request handler
Thread.sleep(2000);
//
final var isKeepAlive = HttpUtil.isKeepAlive(msg);
// Response
final var content = "PONG".getBytes();
final var response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, Unpooled.wrappedBuffer(content));
response.headers().set(HttpHeaderNames.CONTENT_TYPE, "text/plain");
response.headers().set(HttpHeaderNames.CONTENT_LENGTH, content.length);
response.headers().set(HttpHeaderNames.CONNECTION, isKeepAlive ? HttpHeaderValues.KEEP_ALIVE : HttpHeaderValues.CLOSE);
//
ctx.writeAndFlush(response).addListener((ChannelFuture future) -> {
if (!isKeepAlive || !future.isSuccess()) {
future.channel().close();
}
System.out.println("SUCCESS: " + (future.isDone() && future.isSuccess()));
System.out.println("ERROR: " + (future.isDone() && future.cause() != null) + ". " + future.cause());
System.out.println("CANCELED: " + (future.isDone() && future.isCancelled()));
});
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
}
} I need to get message status after writeAndFlush. I try to add listener, but it does't work when client reset connection. For example: using curl I send get request to server. When in server console log I see message about new request(
Why I get same question here |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments
-
Someone? :) |
Beta Was this translation helpful? Give feedback.
-
When you terminate curl like that, the client does not do a TCP termination handshake, so the server does not know that the peer has closed its connection. Thus, the connection state on the server is open and we successfully deliver our data to the kernel to be sent. A little while later, the data packets starts arriving at the client, but the client-side kernel has no open connection by that port number, so it responds with a TCP RST packet. When that packet is delivered on the server, it gets turned into a |
Beta Was this translation helpful? Give feedback.
When you terminate curl like that, the client does not do a TCP termination handshake, so the server does not know that the peer has closed its connection. Thus, the connection state on the server is open and we successfully deliver our data to the kernel to be sent. A little while later, the data packets starts arriving at the client, but the client-side kernel has no open connection by that port number, so it responds with a TCP RST packet. When that packet is delivered on the server, it gets turned into a
SocketException: Connection reset
. Since we 1) have already completed our writes, and 2) have no idea which specific packet caused the TCP RST, we cannot possibly pick a future to fail.…