diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 3e4655e8e65..f2cad9ebc31 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -290,10 +290,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if _, err := io.ReadFull(t.conn, preface); err != nil { // In deployments where a gRPC server runs behind a cloud load balancer // which performs regular TCP level health checks, the connection is - // closed immediately by the latter. Skipping the error here will help - // reduce log clutter. + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. if err == io.EOF { - return nil, nil + return nil, io.EOF } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } diff --git a/server.go b/server.go index 1e82c955529..eadf9e05fd1 100644 --- a/server.go +++ b/server.go @@ -885,13 +885,11 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { // ErrConnDispatched means that the connection was dispatched away from // gRPC; those connections should be left open. if err != credentials.ErrConnDispatched { - c.Close() - } - // Don't log on ErrConnDispatched and io.EOF to prevent log spam. - if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) } + c.Close() } return nil } diff --git a/test/end2end_test.go b/test/end2end_test.go index 9d7fcb23206..957d13f731f 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -7990,3 +7990,61 @@ func (s) TestAuthorityHeader(t *testing.T) { }) } } + +// wrapCloseListener tracks Accepts/Closes and maintains a counter of the +// number of open connections. +type wrapCloseListener struct { + net.Listener + connsOpen int32 +} + +// wrapCloseListener is returned by wrapCloseListener.Accept and decrements its +// connsOpen when Close is called. +type wrapCloseConn struct { + net.Conn + lis *wrapCloseListener + closeOnce sync.Once +} + +func (w *wrapCloseListener) Accept() (net.Conn, error) { + conn, err := w.Listener.Accept() + if err != nil { + return nil, err + } + atomic.AddInt32(&w.connsOpen, 1) + return &wrapCloseConn{Conn: conn, lis: w}, nil +} + +func (w *wrapCloseConn) Close() error { + defer w.closeOnce.Do(func() { atomic.AddInt32(&w.lis.connsOpen, -1) }) + return w.Conn.Close() +} + +// TestServerClosesConn ensures conn.Close is always closed even if the client +// doesn't complete the HTTP/2 handshake. +func (s) TestServerClosesConn(t *testing.T) { + lis := bufconn.Listen(20) + wrapLis := &wrapCloseListener{Listener: lis} + + s := grpc.NewServer() + go s.Serve(wrapLis) + defer s.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + for i := 0; i < 10; i++ { + conn, err := lis.DialContext(ctx) + if err != nil { + t.Fatalf("Dial = _, %v; want _, nil", err) + } + conn.Close() + } + for ctx.Err() == nil { + if atomic.LoadInt32(&wrapLis.connsOpen) == 0 { + return + } + time.Sleep(50 * time.Millisecond) + } + t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen)) +}