diff --git a/go.mod b/go.mod index 95c875665c..4d4fb58535 100644 --- a/go.mod +++ b/go.mod @@ -60,7 +60,7 @@ require ( golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 google.golang.org/api v0.61.0 - google.golang.org/grpc v1.40.0 + google.golang.org/grpc v1.41.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b sigs.k8s.io/yaml v1.2.0 @@ -211,9 +211,6 @@ replace github.com/hashicorp/go-immutable-radix => github.com/hashicorp/go-immut replace github.com/hashicorp/go-hclog => github.com/hashicorp/go-hclog v0.12.2 -// TODO review the change introduced by https://github.com/grpc/grpc-go/pull/4416 before upgrading to 1.39.0 -replace google.golang.org/grpc => google.golang.org/grpc v1.38.0 - replace github.com/thanos-io/thanos v0.22.0 => github.com/thanos-io/thanos v0.19.1-0.20211229180107-bf14049d5745 // Replace memberlist with our fork which includes some fixes that haven't been diff --git a/go.sum b/go.sum index f6bb74e2ab..315cd0790b 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,5 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -264,8 +265,13 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe h1:QJDJubh0OEcpeGjC7/8uF9tt4e39U/Ya1uyK+itnNPQ= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= @@ -471,7 +477,15 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -715,6 +729,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -1034,6 +1049,7 @@ github.com/linode/linodego v1.2.1/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0 github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1641,6 +1657,7 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1667,6 +1684,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1771,6 +1789,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1920,8 +1939,10 @@ golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13W golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -2042,6 +2063,7 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= google.golang.org/api v0.61.0 h1:TXXKS1slM3b2bZNJwD5DV/Tp6/M2cLzLOLh9PjDhrw8= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2122,8 +2144,41 @@ google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 h1:DN5b3HU13J4sMd/QjDx34U6afpaexKTDdop+26pdjdk= google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20211119005141-f45e61797429/go.mod h1:gID3PKrg7pWKntu9Ss6zTLJ0ttC0X9IHgREOCZwbCVU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2187,6 +2242,7 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go deleted file mode 100644 index e4ffca838a..0000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cmpopts provides common options for the cmp package. -package cmpopts - -import ( - "math" - "reflect" - "time" - - "github.com/google/go-cmp/cmp" -) - -func equateAlways(_, _ interface{}) bool { return true } - -// EquateEmpty returns a Comparer option that determines all maps and slices -// with a length of zero to be equal, regardless of whether they are nil. -// -// EquateEmpty can be used in conjunction with SortSlices and SortMaps. -func EquateEmpty() cmp.Option { - return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) -} - -func isEmpty(x, y interface{}) bool { - vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) - return (x != nil && y != nil && vx.Type() == vy.Type()) && - (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && - (vx.Len() == 0 && vy.Len() == 0) -} - -// EquateApprox returns a Comparer option that determines float32 or float64 -// values to be equal if they are within a relative fraction or absolute margin. -// This option is not used when either x or y is NaN or infinite. -// -// The fraction determines that the difference of two values must be within the -// smaller fraction of the two values, while the margin determines that the two -// values must be within some absolute margin. -// To express only a fraction or only a margin, use 0 for the other parameter. -// The fraction and margin must be non-negative. -// -// The mathematical expression used is equivalent to: -// |x-y| ≤ max(fraction*min(|x|, |y|), margin) -// -// EquateApprox can be used in conjunction with EquateNaNs. -func EquateApprox(fraction, margin float64) cmp.Option { - if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { - panic("margin or fraction must be a non-negative number") - } - a := approximator{fraction, margin} - return cmp.Options{ - cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)), - cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)), - } -} - -type approximator struct{ frac, marg float64 } - -func areRealF64s(x, y float64) bool { - return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0) -} -func areRealF32s(x, y float32) bool { - return areRealF64s(float64(x), float64(y)) -} -func (a approximator) compareF64(x, y float64) bool { - relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y)) - return math.Abs(x-y) <= math.Max(a.marg, relMarg) -} -func (a approximator) compareF32(x, y float32) bool { - return a.compareF64(float64(x), float64(y)) -} - -// EquateNaNs returns a Comparer option that determines float32 and float64 -// NaN values to be equal. -// -// EquateNaNs can be used in conjunction with EquateApprox. -func EquateNaNs() cmp.Option { - return cmp.Options{ - cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), - cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)), - } -} - -func areNaNsF64s(x, y float64) bool { - return math.IsNaN(x) && math.IsNaN(y) -} -func areNaNsF32s(x, y float32) bool { - return areNaNsF64s(float64(x), float64(y)) -} - -// EquateApproxTime returns a Comparer option that determines two non-zero -// time.Time values to be equal if they are within some margin of one another. -// If both times have a monotonic clock reading, then the monotonic time -// difference will be used. The margin must be non-negative. -func EquateApproxTime(margin time.Duration) cmp.Option { - if margin < 0 { - panic("margin must be a non-negative number") - } - a := timeApproximator{margin} - return cmp.FilterValues(areNonZeroTimes, cmp.Comparer(a.compare)) -} - -func areNonZeroTimes(x, y time.Time) bool { - return !x.IsZero() && !y.IsZero() -} - -type timeApproximator struct { - margin time.Duration -} - -func (a timeApproximator) compare(x, y time.Time) bool { - // Avoid subtracting times to avoid overflow when the - // difference is larger than the largest representible duration. - if x.After(y) { - // Ensure x is always before y - x, y = y, x - } - // We're within the margin if x+margin >= y. - // Note: time.Time doesn't have AfterOrEqual method hence the negation. - return !x.Add(a.margin).Before(y) -} - -// AnyError is an error that matches any non-nil error. -var AnyError anyError - -type anyError struct{} - -func (anyError) Error() string { return "any error" } -func (anyError) Is(err error) bool { return err != nil } - -// EquateErrors returns a Comparer option that determines errors to be equal -// if errors.Is reports them to match. The AnyError error can be used to -// match any non-nil error. -func EquateErrors() cmp.Option { - return cmp.FilterValues(areConcreteErrors, cmp.Comparer(compareErrors)) -} - -// areConcreteErrors reports whether x and y are types that implement error. -// The input types are deliberately of the interface{} type rather than the -// error type so that we can handle situations where the current type is an -// interface{}, but the underlying concrete types both happen to implement -// the error interface. -func areConcreteErrors(x, y interface{}) bool { - _, ok1 := x.(error) - _, ok2 := y.(error) - return ok1 && ok2 -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go deleted file mode 100644 index 26fe25d6af..0000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_go113.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.13 - -package cmpopts - -import "errors" - -func compareErrors(x, y interface{}) bool { - xe := x.(error) - ye := y.(error) - return errors.Is(xe, ye) || errors.Is(ye, xe) -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go deleted file mode 100644 index 6eeb8d6e65..0000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/errors_xerrors.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2021, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.13 - -// TODO(≥go1.13): For support on = 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) { - panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i))) - } - start = -1 - } else if start == -1 { - start = i - } - } -} -func (ss sliceSorter) less(v reflect.Value, i, j int) bool { - vx, vy := v.Index(i), v.Index(j) - return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() -} - -// SortMaps returns a Transformer option that flattens map[K]V types to be a -// sorted []struct{K, V}. The less function must be of the form -// "func(T, T) bool" which is used to sort any map with key K that is -// assignable to T. -// -// Flattening the map into a slice has the property that cmp.Equal is able to -// use Comparers on K or the K.Equal method if it exists. -// -// The less function must be: -// • Deterministic: less(x, y) == less(x, y) -// • Irreflexive: !less(x, x) -// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) -// • Total: if x != y, then either less(x, y) or less(y, x) -// -// SortMaps can be used in conjunction with EquateEmpty. -func SortMaps(lessFunc interface{}) cmp.Option { - vf := reflect.ValueOf(lessFunc) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", lessFunc)) - } - ms := mapSorter{vf.Type().In(0), vf} - return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort)) -} - -type mapSorter struct { - in reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (ms mapSorter) filter(x, y interface{}) bool { - vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) - return (x != nil && y != nil && vx.Type() == vy.Type()) && - (vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) && - (vx.Len() != 0 || vy.Len() != 0) -} -func (ms mapSorter) sort(x interface{}) interface{} { - src := reflect.ValueOf(x) - outType := reflect.StructOf([]reflect.StructField{ - {Name: "K", Type: src.Type().Key()}, - {Name: "V", Type: src.Type().Elem()}, - }) - dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len()) - for i, k := range src.MapKeys() { - v := reflect.New(outType).Elem() - v.Field(0).Set(k) - v.Field(1).Set(src.MapIndex(k)) - dst.Index(i).Set(v) - } - sort.Slice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) }) - ms.checkSort(dst) - return dst.Interface() -} -func (ms mapSorter) checkSort(v reflect.Value) { - for i := 1; i < v.Len(); i++ { - if !ms.less(v, i-1, i) { - panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i))) - } - } -} -func (ms mapSorter) less(v reflect.Value, i, j int) bool { - vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) - return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go deleted file mode 100644 index a09829c3af..0000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmpopts - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp" -) - -// filterField returns a new Option where opt is only evaluated on paths that -// include a specific exported field on a single struct type. -// The struct type is specified by passing in a value of that type. -// -// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a -// specific sub-field that is embedded or nested within the parent struct. -func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option { - // TODO: This is currently unexported over concerns of how helper filters - // can be composed together easily. - // TODO: Add tests for FilterField. - - sf := newStructFilter(typ, name) - return cmp.FilterPath(sf.filter, opt) -} - -type structFilter struct { - t reflect.Type // The root struct type to match on - ft fieldTree // Tree of fields to match on -} - -func newStructFilter(typ interface{}, names ...string) structFilter { - // TODO: Perhaps allow * as a special identifier to allow ignoring any - // number of path steps until the next field match? - // This could be useful when a concrete struct gets transformed into - // an anonymous struct where it is not possible to specify that by type, - // but the transformer happens to provide guarantees about the names of - // the transformed fields. - - t := reflect.TypeOf(typ) - if t == nil || t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T must be a non-pointer struct", typ)) - } - var ft fieldTree - for _, name := range names { - cname, err := canonicalName(t, name) - if err != nil { - panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err)) - } - ft.insert(cname) - } - return structFilter{t, ft} -} - -func (sf structFilter) filter(p cmp.Path) bool { - for i, ps := range p { - if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) { - return true - } - } - return false -} - -// fieldTree represents a set of dot-separated identifiers. -// -// For example, inserting the following selectors: -// Foo -// Foo.Bar.Baz -// Foo.Buzz -// Nuka.Cola.Quantum -// -// Results in a tree of the form: -// {sub: { -// "Foo": {ok: true, sub: { -// "Bar": {sub: { -// "Baz": {ok: true}, -// }}, -// "Buzz": {ok: true}, -// }}, -// "Nuka": {sub: { -// "Cola": {sub: { -// "Quantum": {ok: true}, -// }}, -// }}, -// }} -type fieldTree struct { - ok bool // Whether this is a specified node - sub map[string]fieldTree // The sub-tree of fields under this node -} - -// insert inserts a sequence of field accesses into the tree. -func (ft *fieldTree) insert(cname []string) { - if ft.sub == nil { - ft.sub = make(map[string]fieldTree) - } - if len(cname) == 0 { - ft.ok = true - return - } - sub := ft.sub[cname[0]] - sub.insert(cname[1:]) - ft.sub[cname[0]] = sub -} - -// matchPrefix reports whether any selector in the fieldTree matches -// the start of path p. -func (ft fieldTree) matchPrefix(p cmp.Path) bool { - for _, ps := range p { - switch ps := ps.(type) { - case cmp.StructField: - ft = ft.sub[ps.Name()] - if ft.ok { - return true - } - if len(ft.sub) == 0 { - return false - } - case cmp.Indirect: - default: - return false - } - } - return false -} - -// canonicalName returns a list of identifiers where any struct field access -// through an embedded field is expanded to include the names of the embedded -// types themselves. -// -// For example, suppose field "Foo" is not directly in the parent struct, -// but actually from an embedded struct of type "Bar". Then, the canonical name -// of "Foo" is actually "Bar.Foo". -// -// Suppose field "Foo" is not directly in the parent struct, but actually -// a field in two different embedded structs of types "Bar" and "Baz". -// Then the selector "Foo" causes a panic since it is ambiguous which one it -// refers to. The user must specify either "Bar.Foo" or "Baz.Foo". -func canonicalName(t reflect.Type, sel string) ([]string, error) { - var name string - sel = strings.TrimPrefix(sel, ".") - if sel == "" { - return nil, fmt.Errorf("name must not be empty") - } - if i := strings.IndexByte(sel, '.'); i < 0 { - name, sel = sel, "" - } else { - name, sel = sel[:i], sel[i:] - } - - // Type must be a struct or pointer to struct. - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("%v must be a struct", t) - } - - // Find the canonical name for this current field name. - // If the field exists in an embedded struct, then it will be expanded. - sf, _ := t.FieldByName(name) - if !isExported(name) { - // Avoid using reflect.Type.FieldByName for unexported fields due to - // buggy behavior with regard to embeddeding and unexported fields. - // See https://golang.org/issue/4876 for details. - sf = reflect.StructField{} - for i := 0; i < t.NumField() && sf.Name == ""; i++ { - if t.Field(i).Name == name { - sf = t.Field(i) - } - } - } - if sf.Name == "" { - return []string{name}, fmt.Errorf("does not exist") - } - var ss []string - for i := range sf.Index { - ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name) - } - if sel == "" { - return ss, nil - } - ssPost, err := canonicalName(sf.Type, sel) - return append(ss, ssPost...), err -} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go deleted file mode 100644 index 4eb49d63db..0000000000 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmpopts - -import ( - "github.com/google/go-cmp/cmp" -) - -type xformFilter struct{ xform cmp.Option } - -func (xf xformFilter) filter(p cmp.Path) bool { - for _, ps := range p { - if t, ok := ps.(cmp.Transform); ok && t.Option() == xf.xform { - return false - } - } - return true -} - -// AcyclicTransformer returns a Transformer with a filter applied that ensures -// that the transformer cannot be recursively applied upon its own output. -// -// An example use case is a transformer that splits a string by lines: -// AcyclicTransformer("SplitLines", func(s string) []string{ -// return strings.Split(s, "\n") -// }) -// -// Had this been an unfiltered Transformer instead, this would result in an -// infinite cycle converting a string to []string to [][]string and so on. -func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option { - xf := xformFilter{cmp.Transformer(name, xformFunc)} - return cmp.FilterPath(xf.filter, xf.xform) -} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 5847d94e55..0000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: go - -matrix: - include: - - go: 1.14.x - env: VET=1 GO111MODULE=on - - go: 1.14.x - env: RACE=1 GO111MODULE=on - - go: 1.14.x - env: RUN386=1 - - go: 1.14.x - env: GRPC_GO_RETRY=on - - go: 1.14.x - env: TESTEXTRAS=1 - - go: 1.13.x - env: GO111MODULE=on - - go: 1.12.x - env: GO111MODULE=on - - go: 1.11.x # Keep until interop tests no longer require Go1.11 - env: GO111MODULE=on - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; security/advancedtls/examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - - if [[ -n "${VET}" ]]; then ./vet.sh; fi - - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - - make test diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 093c82b3af..c6672c0a3e 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -8,17 +8,18 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) -- [canguler](https://github.com/canguler), Google LLC + - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC - [menghanl](https://github.com/menghanl), Google LLC - [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC - [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC - [jtattermusch](https://github.com/jtattermusch), Google LLC - [lyuxuan](https://github.com/lyuxuan), Google LLC - [makmukhi](https://github.com/makmukhi), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 1f0722f162..1f8960922b 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -41,8 +41,6 @@ vetdeps: clean \ proto \ test \ - testappengine \ - testappenginedeps \ testrace \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 0000000000..530197749e --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 3949a683fb..0e6ae69a58 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -136,6 +136,6 @@ errors. [Go module]: https://github.com/golang/go/wiki/Modules [gRPC]: https://grpc.io [Go gRPC docs]: https://grpc.io/docs/languages/go -[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 [quick start]: https://grpc.io/docs/languages/go/quickstart [go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index ab531f4c0b..178de0898a 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -75,24 +75,26 @@ func Get(name string) Builder { return nil } -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. +// A SubConn represents a single connection to a gRPC backend service. // -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -326,6 +328,20 @@ type Balancer interface { Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. @@ -353,8 +369,10 @@ var ErrBadResolverState = errors.New("bad resolver state") // // It's not thread safe. type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. } // RecordTransition records state change happening in subConn and based on that @@ -362,9 +380,11 @@ type ConnectivityStateEvaluator struct { // // - If at least one SubConn in Ready, the aggregated state is Ready; // - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. +// - Else if at least one SubConn is TransientFailure, the aggregated state is Transient Failure; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else there are no subconns and the aggregated state is Transient Failure // -// Idle and Shutdown are not considered. +// Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { // Update counters. for idx, state := range []connectivity.State{oldState, newState} { @@ -374,6 +394,10 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal } } @@ -384,5 +408,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne if cse.numConnecting > 0 { return connectivity.Connecting } + if cse.numTransientFailure > 0 { + return connectivity.TransientFailure + } + if cse.numIdle > 0 { + return connectivity.Idle + } return connectivity.TransientFailure } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index c883efa0bb..8dd504299f 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -133,6 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() } else { // Always update the subconn's address in case the attributes @@ -213,10 +214,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } - if oldS == connectivity.TransientFailure && s == connectivity.Connecting { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } return } b.scStates[sc] = s @@ -242,7 +247,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } @@ -251,6 +255,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + // NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index a43d896411..adf5961116 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -25,6 +25,7 @@ package grpclb import ( "context" "errors" + "fmt" "sync" "time" @@ -221,6 +222,7 @@ type lbBalancer struct { // when resolved address updates are received, and read in the goroutine // handling fallback. resolvedBackendAddrs []resolver.Address + connErr error // the last connection error } // regeneratePicker takes a snapshot of the balancer, and generates a picker from @@ -230,7 +232,7 @@ type lbBalancer struct { // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: balancer.ErrTransientFailure} + lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} return } @@ -336,6 +338,8 @@ func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubCo // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) + case connectivity.TransientFailure: + lb.connErr = scs.ConnectionError } // Force regenerate picker if // - this sc became ready from not-ready @@ -484,3 +488,5 @@ func (lb *lbBalancer) Close() { } lb.cc.close() } + +func (lb *lbBalancer) ExitIdle() {} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 43c2a15373..274eb2f858 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -47,11 +47,11 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) for sc := range info.ReadySCs { scs = append(scs, sc) } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 4cc7f9159b..f4ea617468 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -37,14 +37,20 @@ type scStateUpdate struct { err error } +// exitIdle contains no data and is just a signal sent on the updateCh in +// ccBalancerWrapper to instruct the balancer to exit idle. +type exitIdle struct{} + // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - scBuffer *buffer.Unbounded - done *grpcsync.Event + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + hasExitIdle bool + updateCh *buffer.Unbounded + closed *grpcsync.Event + done *grpcsync.Event mu sync.Mutex subConns map[*acBalancerWrapper]struct{} @@ -53,12 +59,14 @@ type ccBalancerWrapper struct { func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, - scBuffer: buffer.NewUnbounded(), + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() ccb.balancer = b.Build(ccb, bopts) + _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) return ccb } @@ -67,35 +75,72 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.scBuffer.Get(): - ccb.scBuffer.Load() - if ccb.done.HasFired() { + case t := <-ccb.updateCh.Get(): + ccb.updateCh.Load() + if ccb.closed.HasFired() { break } - ccb.balancerMu.Lock() - su := t.(*scStateUpdate) - ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - ccb.balancerMu.Unlock() - case <-ccb.done.Done(): + switch u := t.(type) { + case *scStateUpdate: + ccb.balancerMu.Lock() + ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) + ccb.balancerMu.Unlock() + case *acBalancerWrapper: + ccb.mu.Lock() + if ccb.subConns != nil { + delete(ccb.subConns, u) + ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) + } + ccb.mu.Unlock() + case exitIdle: + if ccb.cc.GetState() == connectivity.Idle { + if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { + // We already checked that the balancer implements + // ExitIdle before pushing the event to updateCh, but + // check conditionally again as defensive programming. + ccb.balancerMu.Lock() + ei.ExitIdle() + ccb.balancerMu.Unlock() + } + } + default: + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + } + case <-ccb.closed.Done(): } - if ccb.done.HasFired() { + if ccb.closed.HasFired() { + ccb.balancerMu.Lock() ccb.balancer.Close() + ccb.balancerMu.Unlock() ccb.mu.Lock() scs := ccb.subConns ccb.subConns = nil ccb.mu.Unlock() + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + ccb.done.Fire() + // Fire done before removing the addr conns. We can safely unblock + // ccb.close and allow the removeAddrConns to happen + // asynchronously. for acbw := range scs { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) return } } } func (ccb *ccBalancerWrapper) close() { - ccb.done.Fire() + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) exitIdle() bool { + if !ccb.hasExitIdle { + return false + } + ccb.updateCh.Put(exitIdle{}) + return true } func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { @@ -109,7 +154,7 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co if sc == nil { return } - ccb.scBuffer.Put(&scStateUpdate{ + ccb.updateCh.Put(&scStateUpdate{ sc: sc, state: s, err: err, @@ -124,8 +169,8 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat func (ccb *ccBalancerWrapper) resolverError(err error) { ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { @@ -150,17 +195,10 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock + // during switchBalancer() if the old balancer calls RemoveSubConn() in its + // Close(). + ccb.updateCh.Put(sc) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -226,17 +264,17 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { return } - ac, err := cc.newAddrConn(addrs, opts) + newAC, err := cc.newAddrConn(addrs, opts) if err != nil { channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() + acbw.ac = newAC + newAC.mu.Lock() + newAC.acbw = acbw + newAC.mu.Unlock() if acState != connectivity.Idle { - ac.connect() + go newAC.connect() } } } @@ -244,7 +282,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { func (acbw *acBalancerWrapper) Connect() { acbw.mu.Lock() defer acbw.mu.Unlock() - acbw.ac.connect() + go acbw.ac.connect() } func (acbw *acBalancerWrapper) getAddrConn() *addrConn { diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 24109264f5..34cc4c948d 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -322,6 +322,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { + cc.Connect() s := cc.GetState() if s == connectivity.Ready { break @@ -539,12 +540,31 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // // Experimental // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { + return + } + for ac := range cc.conns { + go ac.connect() + } +} + func (cc *ClientConn) scWatcher() { for { select { @@ -711,7 +731,12 @@ func (cc *ClientConn) switchBalancer(name string) { return } if cc.balancerWrapper != nil { + // Don't hold cc.mu while closing the balancers. The balancers may call + // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex + // would cause a deadlock in that case. + cc.mu.Unlock() cc.balancerWrapper.close() + cc.mu.Lock() } builder := balancer.Get(name) @@ -840,8 +865,7 @@ func (ac *addrConn) connect() error { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransport() return nil } @@ -878,6 +902,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { + // a.ServerName takes precedent over ClientConn authority, if present. + if a.ServerName == "" { + a.ServerName = ac.cc.authority + } if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break @@ -1046,12 +1074,12 @@ func (cc *ClientConn) Close() error { cc.blockingpicker.close() - if rWrapper != nil { - rWrapper.close() - } if bWrapper != nil { bWrapper.close() } + if rWrapper != nil { + rWrapper.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1130,112 +1158,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } + ac.updateConnectivityState(connectivity.TransientFailure, err) - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } - - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor - } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) - - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close(fmt.Errorf("reached connectivity state: SHUTDOWN")) - return + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + return errConnClosing } ac.cc.mu.RLock() @@ -1250,9 +1252,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil } if firstConnErr == nil { firstConnErr = err @@ -1261,57 +1263,54 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, firstConnErr + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + // TODO: Delete prefaceReceived and move the logic to wait for it into the + // transport. + prefaceReceived := grpcsync.NewEvent() + connClosed := grpcsync.NewEvent() // addr.ServerName takes precedent over ClientConn authority, if present. if addr.ServerName == "" { addr.ServerName = ac.cc.authority } - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - reconnect.Fire() - } + hctx, hcancel := context.WithCancel(ac.ctx) + hcStarted := false // protected by ac.mu onClose := func() { ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() + defer ac.mu.Unlock() + defer connClosed.Fire() + if !hcStarted || hctx.Err() != nil { + // We didn't start the health check or set the state to READY, so + // no need to do anything else here. + // + // OR, we have already cancelled the health check context, meaning + // we have already called onClose once for this transport. In this + // case it would be dangerous to clear the transport and update the + // state, since there may be a new transport in this addrConn. + return + } + hcancel() + ac.transport = nil + // Refresh the name resolver + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } } - onPrefaceReceipt := func() { - close(prefaceReceived) + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + ac.mu.Unlock() + onClose() } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) @@ -1320,27 +1319,67 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne copts.ChannelzParentID = ac.channelzID } - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) - return nil, nil, err + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + return err } select { - case <-time.After(time.Until(connectDeadline)): + case <-connectCtx.Done(): // We didn't get the preface in time. - newTr.Close(fmt.Errorf("failed to receive server preface within timeout")) - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + newTr.Close(transport.ErrConnClosing) + if connectCtx.Err() == context.DeadlineExceeded { + err := errors.New("failed to receive server preface within timeout") + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + return err + } + return nil + case <-prefaceReceived.Done(): // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + if connClosed.HasFired() { + // onClose called first; go idle but do nothing else. + if ac.state != connectivity.Shutdown { + ac.updateConnectivityState(connectivity.Idle, nil) + } + return nil + } + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + go newTr.Close(transport.ErrConnClosing) + return nil + } + ac.curAddr = addr + ac.transport = newTr + hcStarted = true + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil + case <-connClosed.Done(): + // The transport has already closed. If we received the preface, too, + // this is not an error. + select { + case <-prefaceReceived.Done(): + return nil + default: + return errors.New("connection closed before server preface received") + } } - return newTr, reconnect, nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1424,26 +1463,14 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true - } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - return nil, false + return nil } // tearDown starts to tear down the addrConn. diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go index 0101562615..4a89926422 100644 --- a/vendor/google.golang.org/grpc/connectivity/connectivity.go +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -18,7 +18,6 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( @@ -45,7 +44,7 @@ func (s State) String() string { return "SHUTDOWN" default: logger.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + return "INVALID_STATE" } } @@ -61,3 +60,35 @@ const ( // Shutdown indicates the ClientConn has started shutting down. Shutdown ) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go deleted file mode 100644 index ccbf35b331..0000000000 --- a/vendor/google.golang.org/grpc/credentials/go12.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build go1.12 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package credentials - -import "crypto/tls" - -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" -} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index 852ae375cf..c748fd21ce 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -23,6 +23,7 @@ import ( "context" "fmt" "io/ioutil" + "net/url" "sync" "golang.org/x/oauth2" @@ -56,6 +57,16 @@ func (ts TokenSource) RequireTransportSecurity() bool { return true } +// removeServiceNameFromJWTURI removes RPC service name from URI. +func removeServiceNameFromJWTURI(uri string) (string, error) { + parsed, err := url.Parse(uri) + if err != nil { + return "", err + } + parsed.Path = "/" + return parsed.String(), nil +} + type jwtAccess struct { jsonKey []byte } @@ -75,9 +86,15 @@ func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) } func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + // Remove RPC service name from URI that will be used as audience + // in a self-signed JWT token. It follows https://google.aip.dev/auth/4111. + aud, err := removeServiceNameFromJWTURI(uri[0]) + if err != nil { + return nil, err + } // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with // uri as the key, to avoid recreating for every RPC. - ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, aud) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 8ee7124f22..784822d056 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", } diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh deleted file mode 100644 index 15ff9facdd..0000000000 --- a/vendor/google.golang.org/grpc/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 7d7a3056b7..c2fdd58b31 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -69,7 +69,8 @@ type writerSink struct { func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -85,24 +86,27 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } type bufferedSink struct { - mu sync.Mutex - closer io.Closer - out Sink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - - writeStartOnce sync.Once - writeTicker *time.Ticker + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} } func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -113,7 +117,12 @@ const ( func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) @@ -124,10 +133,12 @@ func (fs *bufferedSink) startFlushGoroutine() { } func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } - fs.mu.Lock() + close(fs.done) if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) } @@ -137,7 +148,6 @@ func (fs *bufferedSink) Close() error { if err := fs.out.Close(); err != nil { grpclogLogger.Warningf("failed to close the Sink: %v", err) } - fs.mu.Unlock() return nil } @@ -155,5 +165,6 @@ func NewBufferedSink(o io.WriteCloser) Sink { closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f731413930..6d5760d951 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -630,7 +630,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) if count == 0 { end = true } - var s []*SocketMetric + s := make([]*SocketMetric, 0, len(sks)) for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go index 692dd61817..1b1c4cce34 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go index 19c2fc521d..8b06eed1ab 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -37,6 +38,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index fdf409d55d..8d194e44e1 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 8864a08111..837ddc4024 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go index be70b6cdfc..25ade62305 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2020 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go index f499a614c2..2919632d65 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go index 55664fa46b..f792fd22ca 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/util.go +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -18,7 +18,9 @@ package credentials -import "crypto/tls" +import ( + "crypto/tls" +) const alpnProtoStrH2 = "h2" diff --git a/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go b/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go index 6789a4cf2e..6ef43cc89f 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go +++ b/vendor/google.golang.org/grpc/internal/credentials/xds/handshake_info.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" ) @@ -66,8 +66,8 @@ type HandshakeInfo struct { mu sync.Mutex rootProvider certprovider.Provider identityProvider certprovider.Provider - sanMatchers []xdsinternal.StringMatcher // Only on the client side. - requireClientCert bool // Only on server side. + sanMatchers []matcher.StringMatcher // Only on the client side. + requireClientCert bool // Only on server side. } // SetRootCertProvider updates the root certificate provider. @@ -85,7 +85,7 @@ func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) } // SetSANMatchers updates the list of SAN matchers. -func (hi *HandshakeInfo) SetSANMatchers(sanMatchers []xdsinternal.StringMatcher) { +func (hi *HandshakeInfo) SetSANMatchers(sanMatchers []matcher.StringMatcher) { hi.mu.Lock() hi.sanMatchers = sanMatchers hi.mu.Unlock() @@ -113,10 +113,10 @@ func (hi *HandshakeInfo) UseFallbackCreds() bool { // GetSANMatchersForTesting returns the SAN matchers stored in HandshakeInfo. // To be used only for testing purposes. -func (hi *HandshakeInfo) GetSANMatchersForTesting() []xdsinternal.StringMatcher { +func (hi *HandshakeInfo) GetSANMatchersForTesting() []matcher.StringMatcher { hi.mu.Lock() defer hi.mu.Unlock() - return append([]xdsinternal.StringMatcher{}, hi.sanMatchers...) + return append([]matcher.StringMatcher{}, hi.sanMatchers...) } // ClientSideTLSConfig constructs a tls.Config to be used in a client-side diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 73931a94bc..e766ac04af 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -22,6 +22,8 @@ package envconfig import ( "os" "strings" + + xdsenv "google.golang.org/grpc/internal/xds/env" ) const ( @@ -31,8 +33,8 @@ const ( ) var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on" or if XDS retry support is enabled. + Retry = strings.EqualFold(os.Getenv(retryStr), "on") || xdsenv.RetrySupport // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 200b115ca2..740f83c2b7 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -31,26 +31,37 @@ var ( mu sync.Mutex ) +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Int63n(n) } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Intn(n) } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() - res := r.Float64() - mu.Unlock() - return res + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() } diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 0000000000..0177af4b51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index 5e7f36703d..be7e13d585 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -117,9 +117,12 @@ type ClientInterceptor interface { NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) } -// ServerInterceptor is unimplementable; do not use. +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. type ServerInterceptor interface { - notDefined() + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. } type csKeyType string diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 03825bbe7b..75301c5149 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -277,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return newAddrs, nil } -var filterError = func(err error) error { +func handleDNSError(err error, lookupType string) error { if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } - return err -} - -func handleDNSError(err error, lookupType string) error { - err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) logger.Info(err) @@ -323,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } + newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { ip, ok := formatIP(a) if !ok { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go deleted file mode 100644 index 8783a8cf82..0000000000 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package dns - -import "net" - -func init() { - filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { - // The name does not exist; not an error. - return nil - } - return err - } -} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index c0634d152c..badbdbf597 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -78,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { return err } + var names []string for i, lbcfg := range ir { if len(lbcfg) != 1 { return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) @@ -92,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { for name, jsonCfg = range lbcfg { } + names = append(names, name) builder := balancer.Get(name) if builder == nil { // If the balancer is not registered, move on to the next config. @@ -120,7 +122,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // return. This means we had a loadBalancingConfig slice but did not // encounter a registered policy. The config is considered invalid in this // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) } // MethodConfig defines the configuration recommended by the service providers for a diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 710223b8de..e5c6513edd 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return &Error{e: s.Proto()} + return &Error{s: s} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,19 +136,23 @@ func (s *Status) Details() []interface{} { return details } +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + // Error wraps a pointer of a status proto. It implements error and Status, // and a nil *Error should never be returned by this package. type Error struct { - e *spb.Status + s *Status } func (e *Error) Error() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) + return e.s.String() } // GRPCStatus returns the Status represented by se. func (e *Error) GRPCStatus() *Status { - return FromProto(e.e) + return e.s } // Is implements future error.Is functionality. @@ -158,5 +162,5 @@ func (e *Error) Is(target error) bool { if !ok { return false } - return proto.Equal(e.e, tse.e) + return proto.Equal(e.s.s, tse.s.s) } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go index 4b2964f2a1..b3a72276de 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 7913ef1dbf..999f52cd75 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -35,41 +36,41 @@ var logger = grpclog.Component("core") func log() { once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. +// GetRusage is a no-op function under non-linux environments. func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environments. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments. func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index f63a013762..45532f8aea 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -296,7 +296,7 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -310,10 +310,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { select { - case <-*ch: + case <-ch: case <-c.done: } } @@ -347,8 +347,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } c.mu.Unlock() @@ -389,9 +388,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) } c.transportResponseFrames-- } @@ -407,7 +406,6 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - c.finish() return nil, ErrConnClosing } } @@ -432,6 +430,14 @@ func (c *controlBuffer) finish() { hdr.onOrphaned(ErrConnClosing) } } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) c.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 05d3871e62..1c3459c2b4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -141,9 +141,8 @@ type serverHandlerTransport struct { stats stats.Handler } -func (ht *serverHandlerTransport) Close() error { +func (ht *serverHandlerTransport) Close() { ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil } func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 48c5e52eda..7558630743 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -24,6 +24,7 @@ import ( "io" "math" "net" + "net/http" "strconv" "strings" "sync" @@ -241,7 +242,15 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // and passed to the credential handshaker. This makes it possible for // address specific arbitrary data to reach the credential handshaker. connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + rawConn := conn + // Pull the deadline from the connectCtx, which will be used for + // timeouts in the authentication protocol handshake. Can ignore the + // boolean as the deadline will return the zero value, which will make + // the conn not timeout on I/O operations. + deadline, _ := connectCtx.Deadline() + rawConn.SetDeadline(deadline) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) + rawConn.SetDeadline(time.Time{}) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -399,11 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) } } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } + // Do not close the transport. Let reader goroutine handle it since + // there might be data in the buffers. + t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() return t, nil @@ -608,26 +616,35 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// PerformedIOError wraps an error to indicate IO may have been performed -// before the error occurred. -type PerformedIOError struct { +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. In this case there is no reason to retry at all, as it is +// assumed the RPC would continue to fail on subsequent attempts. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { Err error + + DoNotRetry bool + DoNotTransparentRetry bool } -// Error implements error. -func (p PerformedIOError) Error() string { - return p.Err.Error() +func (e NewStreamError) Error() string { + return e.Err.Error() } // NewStream creates a stream and registers it into the transport as "active" -// streams. +// streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - return nil, PerformedIOError{err} + return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -727,23 +744,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, err + return nil, &NewStreamError{Err: err} } if success { break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} } firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing} } } if t.statsHandler != nil { @@ -878,12 +895,18 @@ func (t *http2Client) Close(err error) { // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status if len(goAwayDebugMessage) > 0 { - err = fmt.Errorf("closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, err, false, http2.ErrCodeNo, status.New(codes.Unavailable, err.Error()), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -1221,7 +1244,11 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } - t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %v", f.ErrCode, string(f.DebugData())) + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { @@ -1254,11 +1281,124 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if h2code, err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1293,9 +1433,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata } } else { // HEADERS frame block carries a Trailers-Only. @@ -1308,9 +1448,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } // reader runs as a separate goroutine in charge of reading data from network diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 11be5599cd..19c13e041d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -102,11 +102,11 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. + // drainChan is initialized when Drain() is called the first time. // After which the server writes out the first GoAway(with ID 2^31-1) frame. // Then an independent goroutine will be launched to later send the second GoAway. // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is // already underway. drainChan chan struct{} state transportState @@ -125,9 +125,30 @@ type http2Server struct { connectionID uint64 } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a non-nil transport and a nil-error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -210,14 +231,15 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) t := &http2Server{ - ctx: context.Background(), + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), @@ -266,6 +288,13 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Skipping the error here will help + // reduce log clutter. + if err == io.EOF { + return nil, nil + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { @@ -295,6 +324,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } } t.conn.Close() + t.controlBuf.finish() close(t.writerDone) }() go t.keepalive() @@ -304,37 +334,92 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if h2code, err := state.decodeHeader(frame); err != nil { - if _, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: h2code, - onWrite: func() {}, - }) - } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) return false } buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = false + mdata = make(map[string][]string) + httpMethod string + // headerError is set if an error is encountered while parsing the headers + headerError bool + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = true + } + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = true + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } } + + if !isGRPC || headerError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return false + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } @@ -347,14 +432,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) - } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) - } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } } t.mu.Lock() if t.state != reachable { @@ -383,10 +468,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( return true } t.maxStreamID = streamID - if state.data.httpMethod != http.MethodPost { + if httpMethod != http.MethodPost { t.mu.Unlock() if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) } t.controlBuf.put(&cleanupStream{ streamID: streamID, @@ -399,7 +484,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: state.data.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() if logger.V(logLevel) { logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) @@ -437,7 +522,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(state.data.mdata).Copy(), + Header: metadata.MD(mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } @@ -1004,12 +1089,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() return } idleTimer.Reset(val) case <-ageTimer.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain() ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1063,11 +1148,11 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close() { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return } t.state = closing streams := t.activeStreams @@ -1075,7 +1160,9 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() + if err := t.conn.Close(); err != nil && logger.V(logLevel) { + logger.Infof("transport: error closing conn during Close: %v", err) + } if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } @@ -1087,7 +1174,6 @@ func (t *http2Server) Close() error { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } - return err } // deleteStream deletes the stream s from transport's active streams. @@ -1152,17 +1238,13 @@ func (t *http2Server) RemoteAddr() net.Addr { } func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { t.mu.Lock() defer t.mu.Unlock() if t.drainChan != nil { return } t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1280,3 +1362,18 @@ func getJitter(v time.Duration) time.Duration { j := grpcrand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index c7dee140cf..d8247bcdf6 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -39,7 +39,6 @@ import ( spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -96,53 +95,6 @@ var ( logger = grpclog.Component("transport") ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - httpMethod string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -180,14 +132,6 @@ func isWhitelistedHeader(hdr string) bool { } } -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -217,168 +161,16 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") - } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return http2.ErrCodeProtocol, d.data.grpcErr - } - if d.serverSide { - return http2.ErrCodeNo, nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return http2.ErrCodeNo, nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return http2.ErrCodeProtocol, d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") -} - -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - case ":method": - d.data.httpMethod = f.Value - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - if logger.V(logLevel) { - logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - } - return - } - d.addMetadata(f.Name, v) + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err } + return status.FromProto(st), nil } type timeoutUnit uint8 diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go index 96967428b5..7bb53cff10 100644 --- a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -17,7 +17,7 @@ */ // Package networktype declares the network type to be used in the default -// dailer. Attribute of a resolver.Address. +// dialer. Attribute of a resolver.Address. package networktype import ( diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 6cc1031fd9..d3bf65b2bd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -30,6 +30,7 @@ import ( "net" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -518,7 +519,8 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters @@ -532,12 +534,6 @@ type ServerConfig struct { HeaderTableSize *uint32 } -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. @@ -694,7 +690,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close() // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap.go deleted file mode 100644 index 97ec8e1720..0000000000 --- a/vendor/google.golang.org/grpc/internal/xds/bootstrap.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xds - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/xds/env" -) - -var logger = grpclog.Component("internal/xds") - -// TransportAPI refers to the API version for xDS transport protocol. -type TransportAPI int - -const ( - // TransportV2 refers to the v2 xDS transport protocol. - TransportV2 TransportAPI = iota - // TransportV3 refers to the v3 xDS transport protocol. - TransportV3 -) - -// BootstrapOptions wraps the parameters passed to SetupBootstrapFile. -type BootstrapOptions struct { - // Version is the xDS transport protocol version. - Version TransportAPI - // NodeID is the node identifier of the gRPC client/server node in the - // proxyless service mesh. - NodeID string - // ServerURI is the address of the management server. - ServerURI string - // ServerListenerResourceNameTemplate is the Listener resource name to fetch. - ServerListenerResourceNameTemplate string - // CertificateProviders is the certificate providers configuration. - CertificateProviders map[string]json.RawMessage -} - -// SetupBootstrapFile creates a temporary file with bootstrap contents, based on -// the passed in options, and updates the bootstrap environment variable to -// point to this file. -// -// Returns a cleanup function which will be non-nil if the setup process was -// completed successfully. It is the responsibility of the caller to invoke the -// cleanup function at the end of the test. -func SetupBootstrapFile(opts BootstrapOptions) (func(), error) { - f, err := ioutil.TempFile("", "test_xds_bootstrap_*") - if err != nil { - return nil, fmt.Errorf("failed to created bootstrap file: %v", err) - } - - cfg := &bootstrapConfig{ - XdsServers: []server{ - { - ServerURI: opts.ServerURI, - ChannelCreds: []creds{ - { - Type: "insecure", - }, - }, - }, - }, - Node: node{ - ID: opts.NodeID, - }, - CertificateProviders: opts.CertificateProviders, - ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, - } - switch opts.Version { - case TransportV2: - // TODO: Add any v2 specific fields. - case TransportV3: - cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "xds_v3") - default: - return nil, fmt.Errorf("unsupported xDS transport protocol version: %v", opts.Version) - } - - bootstrapContents, err := json.MarshalIndent(cfg, "", " ") - if err != nil { - return nil, fmt.Errorf("failed to created bootstrap file: %v", err) - } - if err := ioutil.WriteFile(f.Name(), bootstrapContents, 0644); err != nil { - return nil, fmt.Errorf("failed to created bootstrap file: %v", err) - } - logger.Infof("Created bootstrap file at %q with contents: %s\n", f.Name(), bootstrapContents) - - origBootstrapFileName := env.BootstrapFileName - env.BootstrapFileName = f.Name() - return func() { - os.Remove(f.Name()) - env.BootstrapFileName = origBootstrapFileName - }, nil -} - -type bootstrapConfig struct { - XdsServers []server `json:"xds_servers,omitempty"` - Node node `json:"node,omitempty"` - CertificateProviders map[string]json.RawMessage `json:"certificate_providers,omitempty"` - ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` -} - -type server struct { - ServerURI string `json:"server_uri,omitempty"` - ChannelCreds []creds `json:"channel_creds,omitempty"` - ServerFeatures []string `json:"server_features,omitempty"` -} - -type creds struct { - Type string `json:"type,omitempty"` - Config interface{} `json:"config,omitempty"` -} - -type node struct { - ID string `json:"id,omitempty"` -} diff --git a/vendor/google.golang.org/grpc/internal/xds/env/env.go b/vendor/google.golang.org/grpc/internal/xds/env/env.go index db9ac93b96..b171ac91f1 100644 --- a/vendor/google.golang.org/grpc/internal/xds/env/env.go +++ b/vendor/google.golang.org/grpc/internal/xds/env/env.go @@ -39,11 +39,11 @@ const ( // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - circuitBreakingSupportEnv = "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING" - timeoutSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT" - faultInjectionSupportEnv = "GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION" + ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY" + rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RBAC" c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" @@ -52,35 +52,27 @@ const ( var ( // BootstrapFileName holds the name of the file which contains xDS bootstrap // configuration. Users can specify the location of the bootstrap file by - // setting the environment variable "GRPC_XDS_BOOSTRAP". + // setting the environment variable "GRPC_XDS_BOOTSTRAP". // // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileName = os.Getenv(BootstrapFileNameEnv) // BootstrapFileContent holds the content of the xDS bootstrap // configuration. Users can specify the bootstrap config by - // setting the environment variable "GRPC_XDS_BOOSTRAP_CONFIG". + // setting the environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". // // When both bootstrap FileName and FileContent are set, FileName is used. BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) - - // CircuitBreakingSupport indicates whether circuit breaking support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING" to "false". - CircuitBreakingSupport = !strings.EqualFold(os.Getenv(circuitBreakingSupportEnv), "false") - // TimeoutSupport indicates whether support for max_stream_duration in - // route actions is enabled. This can be disabled by setting the - // environment variable "GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT" to "false". - TimeoutSupport = !strings.EqualFold(os.Getenv(timeoutSupportEnv), "false") - // FaultInjectionSupport is used to control both fault injection and HTTP - // filter support. - FaultInjectionSupport = !strings.EqualFold(os.Getenv(faultInjectionSupportEnv), "false") + // RingHashSupport indicates whether ring hash support is enabled, which can + // be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + RingHashSupport = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") // ClientSideSecuritySupport is used to control processing of security // configuration on the client-side. // // Note that there is no env var protection for the server-side because we // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. - ClientSideSecuritySupport = strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "true") + ClientSideSecuritySupport = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") // AggregateAndDNSSupportEnv indicates whether processing of aggregated // cluster and DNS cluster is enabled, which can be enabled by setting the // environment variable @@ -88,6 +80,12 @@ var ( // "true". AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + // RetrySupport indicates whether xDS retry is enabled. + RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false") + + // RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled. + RBACSupport = strings.EqualFold(os.Getenv(rbacSupportEnv), "true") + // C2PResolverSupport indicates whether support for C2P resolver is enabled. // This can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". diff --git a/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go new file mode 100644 index 0000000000..35a22adadc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds/matcher/matcher_header.go @@ -0,0 +1,253 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package matcher + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/metadata" +) + +// HeaderMatcher is an interface for header matchers. These are +// documented in (EnvoyProxy link here?). These matchers will match on different +// aspects of HTTP header name/value pairs. +type HeaderMatcher interface { + Match(metadata.MD) bool + String() string +} + +// mdValuesFromOutgoingCtx retrieves metadata from context. If there are +// multiple values, the values are concatenated with "," (comma and no space). +// +// All header matchers only match against the comma-concatenated string. +func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { + vs, ok := md[key] + if !ok { + return "", false + } + return strings.Join(vs, ","), true +} + +// HeaderExactMatcher matches on an exact match of the value of the header. +type HeaderExactMatcher struct { + key string + exact string +} + +// NewHeaderExactMatcher returns a new HeaderExactMatcher. +func NewHeaderExactMatcher(key, exact string) *HeaderExactMatcher { + return &HeaderExactMatcher{key: key, exact: exact} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderExactMatcher. +func (hem *HeaderExactMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hem.key) + if !ok { + return false + } + return v == hem.exact +} + +func (hem *HeaderExactMatcher) String() string { + return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact) +} + +// HeaderRegexMatcher matches on whether the entire request header value matches +// the regex. +type HeaderRegexMatcher struct { + key string + re *regexp.Regexp +} + +// NewHeaderRegexMatcher returns a new HeaderRegexMatcher. +func NewHeaderRegexMatcher(key string, re *regexp.Regexp) *HeaderRegexMatcher { + return &HeaderRegexMatcher{key: key, re: re} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRegexMatcher. +func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + return hrm.re.MatchString(v) +} + +func (hrm *HeaderRegexMatcher) String() string { + return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String()) +} + +// HeaderRangeMatcher matches on whether the request header value is within the +// range. The header value must be an integer in base 10 notation. +type HeaderRangeMatcher struct { + key string + start, end int64 // represents [start, end). +} + +// NewHeaderRangeMatcher returns a new HeaderRangeMatcher. +func NewHeaderRangeMatcher(key string, start, end int64) *HeaderRangeMatcher { + return &HeaderRangeMatcher{key: key, start: start, end: end} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRangeMatcher. +func (hrm *HeaderRangeMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { + return true + } + return false +} + +func (hrm *HeaderRangeMatcher) String() string { + return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end) +} + +// HeaderPresentMatcher will match based on whether the header is present in the +// whole request. +type HeaderPresentMatcher struct { + key string + present bool +} + +// NewHeaderPresentMatcher returns a new HeaderPresentMatcher. +func NewHeaderPresentMatcher(key string, present bool) *HeaderPresentMatcher { + return &HeaderPresentMatcher{key: key, present: present} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPresentMatcher. +func (hpm *HeaderPresentMatcher) Match(md metadata.MD) bool { + vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) + present := ok && len(vs) > 0 + return present == hpm.present +} + +func (hpm *HeaderPresentMatcher) String() string { + return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present) +} + +// HeaderPrefixMatcher matches on whether the prefix of the header value matches +// the prefix passed into this struct. +type HeaderPrefixMatcher struct { + key string + prefix string +} + +// NewHeaderPrefixMatcher returns a new HeaderPrefixMatcher. +func NewHeaderPrefixMatcher(key string, prefix string) *HeaderPrefixMatcher { + return &HeaderPrefixMatcher{key: key, prefix: prefix} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPrefixMatcher. +func (hpm *HeaderPrefixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hpm.key) + if !ok { + return false + } + return strings.HasPrefix(v, hpm.prefix) +} + +func (hpm *HeaderPrefixMatcher) String() string { + return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix) +} + +// HeaderSuffixMatcher matches on whether the suffix of the header value matches +// the suffix passed into this struct. +type HeaderSuffixMatcher struct { + key string + suffix string +} + +// NewHeaderSuffixMatcher returns a new HeaderSuffixMatcher. +func NewHeaderSuffixMatcher(key string, suffix string) *HeaderSuffixMatcher { + return &HeaderSuffixMatcher{key: key, suffix: suffix} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderSuffixMatcher. +func (hsm *HeaderSuffixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return strings.HasSuffix(v, hsm.suffix) +} + +func (hsm *HeaderSuffixMatcher) String() string { + return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) +} + +// HeaderContainsMatcher matches on whether the header value contains the +// value passed into this struct. +type HeaderContainsMatcher struct { + key string + contains string +} + +// NewHeaderContainsMatcher returns a new HeaderContainsMatcher. key is the HTTP +// Header key to match on, and contains is the value that the header should +// should contain for a successful match. An empty contains string does not +// work, use HeaderPresentMatcher in that case. +func NewHeaderContainsMatcher(key string, contains string) *HeaderContainsMatcher { + return &HeaderContainsMatcher{key: key, contains: contains} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderContainsMatcher. +func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hcm.key) + if !ok { + return false + } + return strings.Contains(v, hcm.contains) +} + +func (hcm *HeaderContainsMatcher) String() string { + return fmt.Sprintf("headerContains:%v%v", hcm.key, hcm.contains) +} + +// InvertMatcher inverts the match result of the underlying header matcher. +type InvertMatcher struct { + m HeaderMatcher +} + +// NewInvertMatcher returns a new InvertMatcher. +func NewInvertMatcher(m HeaderMatcher) *InvertMatcher { + return &InvertMatcher{m: m} +} + +// Match returns whether the passed in HTTP Headers match according to the +// InvertMatcher. +func (i *InvertMatcher) Match(md metadata.MD) bool { + return !i.m.Match(md) +} + +func (i *InvertMatcher) String() string { + return fmt.Sprintf("invert{%s}", i.m) +} diff --git a/vendor/google.golang.org/grpc/internal/xds/string_matcher.go b/vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go similarity index 98% rename from vendor/google.golang.org/grpc/internal/xds/string_matcher.go rename to vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go index 21f15aad1b..d7df6a1e2b 100644 --- a/vendor/google.golang.org/grpc/internal/xds/string_matcher.go +++ b/vendor/google.golang.org/grpc/internal/xds/matcher/string_matcher.go @@ -16,9 +16,9 @@ * */ -// Package xds contains types that need to be shared between code under +// Package matcher contains types that need to be shared between code under // google.golang.org/grpc/xds/... and the rest of gRPC. -package xds +package matcher import ( "errors" diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index e4cbea9174..3604c7819f 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -93,12 +93,16 @@ func (md MD) Copy() MD { } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -107,7 +111,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -116,9 +123,17 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -145,8 +160,8 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -159,20 +174,34 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := MD{} + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } + return out, true } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). // -// This is intended for gRPC-internal use ONLY. +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { @@ -182,16 +211,23 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - out := raw.md.Copy() + out := MD{} + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = v + } for _, added := range raw.added { if len(added)%2 == 1 { panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a58174b6f4..0878ada9db 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -147,7 +147,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. logger.Error("subconn returned from pick is not *acBalancerWrapper") continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { return t, doneChannelzWrapper(acw, pickResult.Done), nil } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index b858c2a5e6..f194d14a08 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -107,10 +107,12 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S } switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: + case connectivity.Ready: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ ConnectivityState: s.ConnectivityState, @@ -122,6 +124,12 @@ func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.S func (b *pickfirstBalancer) Close() { } +func (b *pickfirstBalancer) ExitIdle() { + if b.state == connectivity.Idle { + b.sc.Connect() + } +} + type picker struct { result balancer.PickResult err error @@ -131,6 +139,17 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + sc balancer.SubConn +} + +func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + i.sc.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + func init() { balancer.Register(newPickfirstBuilder()) } diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index 3679d702ab..f6e7b5ae35 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -27,7 +27,9 @@ import ( // NewBuilderWithScheme creates a new test resolver builder with the given scheme. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, scheme: scheme, } } @@ -35,11 +37,17 @@ func NewBuilderWithScheme(scheme string) *Resolver { // Resolver is also a resolver builder. // It's build() function always returns itself. type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) // ResolveNowCallback is called when the ResolveNow method is called on the // resolver. Must not be nil. Must not be changed after the resolver may // be built. ResolveNowCallback func(resolver.ResolveNowOptions) - scheme string + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string // Fields actually belong to the resolver. CC resolver.ClientConn @@ -54,6 +62,7 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) r.CC = cc if r.bootstrapState != nil { r.UpdateState(*r.bootstrapState) @@ -72,9 +81,16 @@ func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { } // Close is a noop for Resolver. -func (*Resolver) Close() {} +func (r *Resolver) Close() { + r.CloseCallback() +} // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { r.CC.UpdateState(s) } + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.CC.ReportError(err) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 4118de571a..2c47cd54f0 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -39,6 +39,8 @@ type ccResolverWrapper struct { resolver resolver.Resolver done *grpcsync.Event curState resolver.State + + incomingMu sync.Mutex // Synchronizes all the incoming calls. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and @@ -90,6 +92,8 @@ func (ccr *ccResolverWrapper) close() { } func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return nil } @@ -105,6 +109,8 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { } func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -114,6 +120,8 @@ func (ccr *ccResolverWrapper) ReportError(err error) { // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } @@ -128,6 +136,8 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.incomingMu.Lock() + defer ccr.incomingMu.Unlock() if ccr.done.HasFired() { return } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 6db356fa56..87987a2e65 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -258,7 +258,8 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } // WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if @@ -828,26 +829,28 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err } + return status.Error(codes.Unknown, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0a151dee4f..557f29559d 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -710,13 +710,6 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener channelzID int64 @@ -839,28 +832,14 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { return } - rawConn.SetDeadline(time.Time{}) if !s.addConn(lisAddr, st) { return } @@ -881,10 +860,11 @@ func (s *Server) drainServerTransports(addr string) { // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, StatsHandler: s.opts.statsHandler, KeepaliveParams: s.opts.keepaliveParams, @@ -897,13 +877,22 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + c.Close() + } + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != credentials.ErrConnDispatched { + if err != io.EOF { + channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + } return nil } @@ -1109,22 +1098,24 @@ func chainUnaryServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } + chainedInt = chainUnaryInterceptors(interceptors) } s.opts.unaryInt = chainedInt } -// getChainUnaryHandler recursively generate the chained UnaryHandler -func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(ctx context.Context, req interface{}) (interface{}, error) { - return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + var i int + var next UnaryHandler + next = func(ctx context.Context, req interface{}) (interface{}, error) { + if i == len(interceptors)-1 { + return interceptors[i](ctx, req, info, handler) + } + i++ + return interceptors[i-1](ctx, req, info, next) + } + return next(ctx, req) } } @@ -1138,7 +1129,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1390,22 +1383,24 @@ func chainStreamServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } + chainedInt = chainStreamInterceptors(interceptors) } s.opts.streamInt = chainedInt } -// getChainStreamHandler recursively generate the chained StreamHandler -func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { - if curr == len(interceptors)-1 { - return finalHandler - } - - return func(srv interface{}, ss ServerStream) error { - return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + var i int + var next StreamHandler + next = func(srv interface{}, ss ServerStream) error { + if i == len(interceptors)-1 { + return interceptors[i](srv, ss, info, handler) + } + i++ + return interceptors[i-1](srv, ss, info, next) + } + return next(srv, ss) } } @@ -1418,7 +1413,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1521,6 +1518,8 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } } + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } @@ -1588,7 +1587,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 63e476ee7f..0285dcc6a2 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -36,15 +36,22 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 1f3e70d2c4..625d47b34e 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -274,33 +274,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -314,7 +287,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, onCommit: onCommit, } @@ -323,9 +295,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { + if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { cs.finish(err) return nil, err } @@ -373,8 +343,43 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client // newAttemptLocked creates a new attempt with a transport. // If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { +func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + sh := cs.cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + newAttempt := &csAttempt{ + ctx: ctx, + beginTime: beginTime, cs: cs, dc: cs.cc.dopts.dc, statsHandler: sh, @@ -389,15 +394,14 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r } }() - if err := cs.ctx.Err(); err != nil { + if err := ctx.Err(); err != nil { return toRPCErr(err) } - ctx := cs.ctx if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } @@ -417,14 +421,11 @@ func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (r func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - if _, ok := err.(transport.PerformedIOError); ok { - // Return without converting to an RPC error so retry code can - // inspect. - return err - } - return toRPCErr(err) + // Return without converting to an RPC error so retry code can + // inspect. + return err } cs.attempt.s = s cs.attempt.p = &parser{r: s} @@ -445,8 +446,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -486,6 +486,7 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { + ctx context.Context cs *clientStream t transport.ClientTransport s *transport.Stream @@ -504,6 +505,7 @@ type csAttempt struct { trInfo *traceInfo statsHandler stats.Handler + beginTime time.Time } func (cs *clientStream) commitAttemptLocked() { @@ -521,46 +523,57 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - unprocessed := false +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (cs *clientStream) shouldRetry(err error) (bool, error) { if cs.attempt.s == nil { - pioErr, ok := err.(transport.PerformedIOError) - if ok { - // Unwrap error. - err = toRPCErr(pioErr.Err) - } else { - unprocessed = true + // Error from NewClientStream. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected, but assume no I/O was performed and the RPC is not + // fatal, so retry indefinitely. + return true, nil } - if !ok && !cs.callInfo.failFast { - // In the event of a non-IO operation error from NewStream, we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil + + // Unwrap and convert error. + err = toRPCErr(nse.Err) + + // Never retry DoNotRetry errors, which indicate the RPC should not be + // retried due to max header list size violation, etc. + if nse.DoNotRetry { + return false, err + } + + // In the event of a non-IO operation error from NewStream, we never + // attempted to write anything to the wire, so we can retry + // indefinitely. + if !nse.DoNotTransparentRetry { + return true, nil } } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. - return err + return false, err } // Wait for the trailers. + unprocessed := false if cs.attempt.s != nil { <-cs.attempt.s.Done() unprocessed = cs.attempt.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - return nil + return true, nil } if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false if cs.attempt.s != nil { if !cs.attempt.s.TrailersOnly() { - return err + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback @@ -571,13 +584,13 @@ func (cs *clientStream) shouldRetry(err error) error { if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } @@ -590,16 +603,16 @@ func (cs *clientStream) shouldRetry(err error) error { rp := cs.methodConfig.RetryPolicy if rp == nil || !rp.RetryableStatusCodes[code] { - return err + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } if cs.numRetries+1 >= rp.MaxAttempts { - return err + return false, err } var dur time.Duration @@ -622,23 +635,24 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. func (cs *clientStream) retryLocked(lastErr error) error { for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { + cs.attempt.finish(toRPCErr(lastErr)) + isTransparent, err := cs.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(nil, nil); err != nil { + if err := cs.newAttemptLocked(isTransparent); err != nil { return err } if lastErr = cs.replayBufferLocked(); lastErr == nil { @@ -659,7 +673,11 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) } a := cs.attempt cs.mu.Unlock() @@ -924,7 +942,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return io.EOF } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -972,7 +990,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { a.mu.Unlock() } if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1034,12 +1052,12 @@ func (a *csAttempt) finish(err error) { if a.statsHandler != nil { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + a.statsHandler.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go index 168cdb8578..3f77f4876e 100644 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -21,6 +21,7 @@ package bufconn import ( + "context" "fmt" "io" "net" @@ -86,8 +87,17 @@ func (l *Listener) Addr() net.Addr { return addr{} } // providing it the server half of the connection, and returns the client half // of the connection. func (l *Listener) Dial() (net.Conn, error) { + return l.DialContext(context.Background()) +} + +// DialContext creates an in-memory full-duplex network connection, unblocks Accept by +// providing it the server half of the connection, and returns the client half +// of the connection. If ctx is Done, returns ctx.Err() +func (l *Listener) DialContext(ctx context.Context) (net.Conn, error) { p1, p2 := newPipe(l.sz), newPipe(l.sz) select { + case <-ctx.Done(): + return nil, ctx.Err() case <-l.done: return nil, errClosed case l.ch <- &conn{p1, p2}: diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bfe5cf8870..48594bc246 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.38.0" +const Version = "1.41.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 1a0dbd7ee5..d923187a7b 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -32,26 +32,14 @@ PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" go version if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - fi + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then PROTOBUF_VERSION=3.14.0 @@ -101,10 +89,6 @@ not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - misspell -error . # - Check that generated proto files are up to date. diff --git a/vendor/google.golang.org/grpc/xds/csds/csds.go b/vendor/google.golang.org/grpc/xds/csds/csds.go index 73b92e9443..c4477a55d1 100644 --- a/vendor/google.golang.org/grpc/xds/csds/csds.go +++ b/vendor/google.golang.org/grpc/xds/csds/csds.go @@ -25,7 +25,6 @@ package csds import ( "context" - "fmt" "io" "time" @@ -38,49 +37,36 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient" "google.golang.org/protobuf/types/known/timestamppb" - _ "google.golang.org/grpc/xds/internal/client/v2" // Register v2 xds_client. - _ "google.golang.org/grpc/xds/internal/client/v3" // Register v3 xds_client. + _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register v2 xds_client. + _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register v3 xds_client. ) -// xdsClientInterface contains methods from xdsClient.Client which are used by -// the server. This is useful for overriding in unit tests. -type xdsClientInterface interface { - DumpLDS() (string, map[string]client.UpdateWithMD) - DumpRDS() (string, map[string]client.UpdateWithMD) - DumpCDS() (string, map[string]client.UpdateWithMD) - DumpEDS() (string, map[string]client.UpdateWithMD) - BootstrapConfig() *bootstrap.Config - Close() -} - var ( logger = grpclog.Component("xds") - newXDSClient = func() (xdsClientInterface, error) { - return client.New() + newXDSClient = func() xdsclient.XDSClient { + c, err := xdsclient.New() + if err != nil { + logger.Warningf("failed to create xds client: %v", err) + return nil + } + return c } ) // ClientStatusDiscoveryServer implementations interface ClientStatusDiscoveryServiceServer. type ClientStatusDiscoveryServer struct { - // xdsClient will always be the same in practise. But we keep a copy in each + // xdsClient will always be the same in practice. But we keep a copy in each // server instance for testing. - xdsClient xdsClientInterface + xdsClient xdsclient.XDSClient } // NewClientStatusDiscoveryServer returns an implementation of the CSDS server that can be // registered on a gRPC server. func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { - xdsC, err := newXDSClient() - if err != nil { - return nil, fmt.Errorf("failed to create xds client: %v", err) - } - return &ClientStatusDiscoveryServer{ - xdsClient: xdsC, - }, nil + return &ClientStatusDiscoveryServer{xdsClient: newXDSClient()}, nil } // StreamClientStatus implementations interface ClientStatusDiscoveryServiceServer. @@ -109,10 +95,13 @@ func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req * } // buildClientStatusRespForReq fetches the status from the client, and returns -// the response to be sent back to client. +// the response to be sent back to xdsclient. // // If it returns an error, the error is a status error. func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + if s.xdsClient == nil { + return &v3statuspb.ClientStatusResponse{}, nil + } // Field NodeMatchers is unsupported, by design // https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching. if len(req.NodeMatchers) != 0 { @@ -137,7 +126,9 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp // Close cleans up the resources. func (s *ClientStatusDiscoveryServer) Close() { - s.xdsClient.Close() + if s.xdsClient != nil { + s.xdsClient.Close() + } } // nodeProtoToV3 converts the given proto into a v3.Node. n is from bootstrap @@ -173,7 +164,7 @@ func nodeProtoToV3(n proto.Message) *v3corepb.Node { func (s *ClientStatusDiscoveryServer) buildLDSPerXDSConfig() *v3statuspb.PerXdsConfig { version, dump := s.xdsClient.DumpLDS() - var resources []*v3adminpb.ListenersConfigDump_DynamicListener + resources := make([]*v3adminpb.ListenersConfigDump_DynamicListener, 0, len(dump)) for name, d := range dump { configDump := &v3adminpb.ListenersConfigDump_DynamicListener{ Name: name, @@ -207,7 +198,7 @@ func (s *ClientStatusDiscoveryServer) buildLDSPerXDSConfig() *v3statuspb.PerXdsC func (s *ClientStatusDiscoveryServer) buildRDSPerXDSConfig() *v3statuspb.PerXdsConfig { _, dump := s.xdsClient.DumpRDS() - var resources []*v3adminpb.RoutesConfigDump_DynamicRouteConfig + resources := make([]*v3adminpb.RoutesConfigDump_DynamicRouteConfig, 0, len(dump)) for _, d := range dump { configDump := &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ VersionInfo: d.MD.Version, @@ -237,7 +228,7 @@ func (s *ClientStatusDiscoveryServer) buildRDSPerXDSConfig() *v3statuspb.PerXdsC func (s *ClientStatusDiscoveryServer) buildCDSPerXDSConfig() *v3statuspb.PerXdsConfig { version, dump := s.xdsClient.DumpCDS() - var resources []*v3adminpb.ClustersConfigDump_DynamicCluster + resources := make([]*v3adminpb.ClustersConfigDump_DynamicCluster, 0, len(dump)) for _, d := range dump { configDump := &v3adminpb.ClustersConfigDump_DynamicCluster{ VersionInfo: d.MD.Version, @@ -268,7 +259,7 @@ func (s *ClientStatusDiscoveryServer) buildCDSPerXDSConfig() *v3statuspb.PerXdsC func (s *ClientStatusDiscoveryServer) buildEDSPerXDSConfig() *v3statuspb.PerXdsConfig { _, dump := s.xdsClient.DumpEDS() - var resources []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig + resources := make([]*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig, 0, len(dump)) for _, d := range dump { configDump := &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ VersionInfo: d.MD.Version, @@ -296,17 +287,17 @@ func (s *ClientStatusDiscoveryServer) buildEDSPerXDSConfig() *v3statuspb.PerXdsC } } -func serviceStatusToProto(serviceStatus client.ServiceStatus) v3adminpb.ClientResourceStatus { +func serviceStatusToProto(serviceStatus xdsclient.ServiceStatus) v3adminpb.ClientResourceStatus { switch serviceStatus { - case client.ServiceStatusUnknown: + case xdsclient.ServiceStatusUnknown: return v3adminpb.ClientResourceStatus_UNKNOWN - case client.ServiceStatusRequested: + case xdsclient.ServiceStatusRequested: return v3adminpb.ClientResourceStatus_REQUESTED - case client.ServiceStatusNotExist: + case xdsclient.ServiceStatusNotExist: return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST - case client.ServiceStatusACKed: + case xdsclient.ServiceStatusACKed: return v3adminpb.ClientResourceStatus_ACKED - case client.ServiceStatusNACKed: + case xdsclient.ServiceStatusNACKed: return v3adminpb.ClientResourceStatus_NACKED default: return v3adminpb.ClientResourceStatus_UNKNOWN diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index 4ccec4ec41..b9f1c71201 100644 --- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -35,19 +35,20 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/googlecloud" internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/types/known/structpb" ) const ( c2pScheme = "google-c2p" - tdURL = "directpath-trafficdirector.googleapis.com" + tdURL = "directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" @@ -61,15 +62,11 @@ const ( dnsName, xdsName = "dns", "xds" ) -type xdsClientInterface interface { - Close() -} - // For overriding in unittests. var ( onGCE = googlecloud.OnGCE - newClientWithConfig = func(config *bootstrap.Config) (xdsClientInterface, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, error) { return xdsclient.NewWithConfig(config) } @@ -138,7 +135,7 @@ func (c2pResolverBuilder) Scheme() string { type c2pResolver struct { resolver.Resolver - client xdsClientInterface + client xdsclient.XDSClient } func (r *c2pResolver) Close() { @@ -152,13 +149,15 @@ var ipv6EnabledMetadata = &structpb.Struct{ }, } +var id = fmt.Sprintf("C2P-%d", grpcrand.Int()) + // newNode makes a copy of defaultNode, and populate it's Metadata and // Locality fields. func newNode(zone string, ipv6Capable bool) *v3corepb.Node { ret := &v3corepb.Node{ // Not all required fields are set in defaultNote. Metadata will be set // if ipv6 is enabled. Locality will be set to the value from metadata. - Id: "C2P", + Id: id, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, ClientFeatures: []string{clientFeatureNoOverprovisioning}, diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go index 5883027a2c..86656736a6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go @@ -20,8 +20,10 @@ package balancer import ( - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer - _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer - _ "google.golang.org/grpc/xds/internal/balancer/edsbalancer" // Register the EDS balancer - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer + _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer + _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer + _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer ) diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/xds/internal/balancer/balancergroup/balancergroup.go index 5b6d42a25e..5798b03ac5 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/balancergroup/balancergroup.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/balancergroup/balancergroup.go @@ -24,7 +24,7 @@ import ( "time" orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -104,6 +104,22 @@ func (sbc *subBalancerWrapper) startBalancer() { } } +func (sbc *subBalancerWrapper) exitIdle() { + b := sbc.balancer + if b == nil { + return + } + if ei, ok := b.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + for sc, b := range sbc.group.scToSubBalancer { + if b == sbc { + sc.Connect() + } + } +} + func (sbc *subBalancerWrapper) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { b := sbc.balancer if b == nil { @@ -183,7 +199,7 @@ type BalancerGroup struct { cc balancer.ClientConn buildOpts balancer.BuildOptions logger *grpclog.PrefixLogger - loadStore load.PerClusterReporter + loadStore load.PerClusterReporter // TODO: delete this, no longer needed. It was used by EDS. // stateAggregator is where the state/picker updates will be sent to. It's // provided by the parent balancer, to build a picker with all the @@ -493,6 +509,17 @@ func (bg *BalancerGroup) Close() { bg.outgoingMu.Unlock() } +// ExitIdle should be invoked when the parent LB policy's ExitIdle is invoked. +// It will trigger this on all sub-balancers, or reconnect their subconns if +// not supported. +func (bg *BalancerGroup) ExitIdle() { + bg.outgoingMu.Lock() + for _, config := range bg.idToBalancerConfig { + config.exitIdle() + } + bg.outgoingMu.Unlock() +} + const ( serverLoadCPUName = "cpu_utilization" serverLoadMemoryName = "mem_utilization" diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index bf1519bb8c..82d2a96958 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -31,67 +31,57 @@ import ( xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/edsbalancer" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( cdsName = "cds_experimental" - edsName = "eds_experimental" ) var ( errBalancerClosed = errors.New("cdsBalancer is closed") - // newEDSBalancer is a helper function to build a new edsBalancer and will be - // overridden in unittests. - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { - builder := balancer.Get(edsName) + // newChildBalancer is a helper function to build a new cluster_resolver + // balancer and will be overridden in unittests. + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + builder := balancer.Get(clusterresolver.Name) if builder == nil { - return nil, fmt.Errorf("xds: no balancer builder with name %v", edsName) + return nil, fmt.Errorf("xds: no balancer builder with name %v", clusterresolver.Name) } - // We directly pass the parent clientConn to the - // underlying edsBalancer because the cdsBalancer does - // not deal with subConns. + // We directly pass the parent clientConn to the underlying + // cluster_resolver balancer because the cdsBalancer does not deal with + // subConns. return builder.Build(cc, opts), nil } - newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } buildProvider = buildProviderFunc ) func init() { - balancer.Register(cdsBB{}) + balancer.Register(bb{}) } -// cdsBB (short for cdsBalancerBuilder) implements the balancer.Builder -// interface to help build a cdsBalancer. +// bb implements the balancer.Builder interface to help build a cdsBalancer. // It also implements the balancer.ConfigParser interface to help parse the // JSON service config, to be passed to the cdsBalancer. -type cdsBB struct{} +type bb struct{} // Build creates a new CDS balancer with the ClientConn. -func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { b := &cdsBalancer{ - bOpts: opts, - updateCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - cancelWatch: func() {}, // No-op at this point. - xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), + bOpts: opts, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), } b.logger = prefixLogger((b)) b.logger.Infof("Created") - - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil - } - b.xdsClient = client - var creds credentials.TransportCredentials switch { case opts.DialCreds != nil: @@ -103,7 +93,7 @@ func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. b.xdsCredsInUse = true } b.logger.Infof("xDS credentials in use: %v", b.xdsCredsInUse) - + b.clusterHandler = newClusterHandler(b) b.ccw = &ccWrapper{ ClientConn: cc, xdsHI: b.xdsHI, @@ -113,7 +103,7 @@ func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. } // Name returns the name of balancers built by this builder. -func (cdsBB) Name() string { +func (bb) Name() string { return cdsName } @@ -126,7 +116,7 @@ type lbConfig struct { // ParseConfig parses the JSON load balancer config provided into an // internal form or returns an error if the config is invalid. -func (cdsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg lbConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, fmt.Errorf("xds: unable to unmarshal lbconfig: %s, error: %v", string(c), err) @@ -134,52 +124,37 @@ func (cdsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, return &cfg, nil } -// xdsClientInterface contains methods from xdsClient.Client which are used by -// the cdsBalancer. This will be faked out in unittests. -type xdsClientInterface interface { - WatchCluster(string, func(xdsclient.ClusterUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - // ccUpdate wraps a clientConn update received from gRPC (pushed from the // xdsResolver). A valid clusterName causes the cdsBalancer to register a CDS // watcher with the xdsClient, while a non-nil error causes it to cancel the -// existing watch and propagate the error to the underlying edsBalancer. +// existing watch and propagate the error to the underlying cluster_resolver +// balancer. type ccUpdate struct { clusterName string err error } // scUpdate wraps a subConn update received from gRPC. This is directly passed -// on to the edsBalancer. +// on to the cluster_resolver balancer. type scUpdate struct { subConn balancer.SubConn state balancer.SubConnState } -// watchUpdate wraps the information received from a registered CDS watcher. A -// non-nil error is propagated to the underlying edsBalancer. A valid update -// results in creating a new edsBalancer (if one doesn't already exist) and -// pushing the update to it. -type watchUpdate struct { - cds xdsclient.ClusterUpdate - err error -} +type exitIdle struct{} -// cdsBalancer implements a CDS based LB policy. It instantiates an EDS based -// LB policy to further resolve the serviceName received from CDS, into -// localities and endpoints. Implements the balancer.Balancer interface which -// is exposed to gRPC and implements the balancer.ClientConn interface which is -// exposed to the edsBalancer. +// cdsBalancer implements a CDS based LB policy. It instantiates a +// cluster_resolver balancer to further resolve the serviceName received from +// CDS, into localities and endpoints. Implements the balancer.Balancer +// interface which is exposed to gRPC and implements the balancer.ClientConn +// interface which is exposed to the cluster_resolver balancer. type cdsBalancer struct { ccw *ccWrapper // ClientConn interface passed to child LB. bOpts balancer.BuildOptions // BuildOptions passed to child LB. updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates. - xdsClient xdsClientInterface // xDS client to watch Cluster resource. - cancelWatch func() // Cluster watch cancel func. - edsLB balancer.Balancer // EDS child policy. - clusterToWatch string + xdsClient xdsclient.XDSClient // xDS client to watch Cluster resource. + clusterHandler *clusterHandler // To watch the clusters. + childLB balancer.Balancer logger *grpclog.PrefixLogger closed *grpcsync.Event done *grpcsync.Event @@ -195,25 +170,15 @@ type cdsBalancer struct { // handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good // updates lead to registration of a CDS watch. Updates with error lead to // cancellation of existing watch and propagation of the same error to the -// edsBalancer. +// cluster_resolver balancer. func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) { // We first handle errors, if any, and then proceed with handling the // update, only if the status quo has changed. if err := update.err; err != nil { b.handleErrorFromUpdate(err, true) - } - if b.clusterToWatch == update.clusterName { return } - if update.clusterName != "" { - cancelWatch := b.xdsClient.WatchCluster(update.clusterName, b.handleClusterUpdate) - b.logger.Infof("Watch started on resource name %v with xds-client %p", update.clusterName, b.xdsClient) - b.cancelWatch = func() { - cancelWatch() - b.logger.Infof("Watch cancelled on resource name %v with xds-client %p", update.clusterName, b.xdsClient) - } - b.clusterToWatch = update.clusterName - } + b.clusterHandler.updateRootCluster(update.clusterName) } // handleSecurityConfig processes the security configuration received from the @@ -305,22 +270,22 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc } // handleWatchUpdate handles a watch update from the xDS Client. Good updates -// lead to clientConn updates being invoked on the underlying edsBalancer. -func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { +// lead to clientConn updates being invoked on the underlying cluster_resolver balancer. +func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { if err := update.err; err != nil { b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) b.handleErrorFromUpdate(err, false) return } - b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, update.cds) + b.logger.Infof("Watch update from xds-client %p, content: %+v, security config: %v", b.xdsClient, pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) // Process the security config from the received update before building the // child policy or forwarding the update to it. We do this because the child // policy may try to create a new subConn inline. Processing the security // configuration here and setting up the handshakeInfo will make sure that // such attempts are handled properly. - if err := b.handleSecurityConfig(update.cds.SecurityCfg); err != nil { + if err := b.handleSecurityConfig(update.securityCfg); err != nil { // If the security config is invalid, for example, if the provider // instance is not found in the bootstrap config, we need to put the // channel in transient failure. @@ -330,33 +295,67 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { } // The first good update from the watch API leads to the instantiation of an - // edsBalancer. Further updates/errors are propagated to the existing - // edsBalancer. - if b.edsLB == nil { - edsLB, err := newEDSBalancer(b.ccw, b.bOpts) + // cluster_resolver balancer. Further updates/errors are propagated to the existing + // cluster_resolver balancer. + if b.childLB == nil { + childLB, err := newChildBalancer(b.ccw, b.bOpts) if err != nil { - b.logger.Errorf("Failed to create child policy of type %s, %v", edsName, err) + b.logger.Errorf("Failed to create child policy of type %s, %v", clusterresolver.Name, err) return } - b.edsLB = edsLB - b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName) + b.childLB = childLB + b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) + } + + dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates)) + for i, cu := range update.updates { + switch cu.ClusterType { + case xdsclient.ClusterTypeEDS: + dms[i] = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, + Cluster: cu.ClusterName, + EDSServiceName: cu.EDSServiceName, + MaxConcurrentRequests: cu.MaxRequests, + } + if cu.EnableLRS { + // An empty string here indicates that the cluster_resolver balancer should use the + // same xDS server for load reporting as it does for EDS + // requests/responses. + dms[i].LoadReportingServerName = new(string) + + } + case xdsclient.ClusterTypeLogicalDNS: + dms[i] = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, + DNSHostname: cu.DNSHostName, + } + default: + b.logger.Infof("unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) + } } - lbCfg := &edsbalancer.EDSConfig{ - EDSServiceName: update.cds.ServiceName, - MaxConcurrentRequests: update.cds.MaxRequests, + lbCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: dms, } - if update.cds.EnableLRS { - // An empty string here indicates that the edsBalancer should use the - // same xDS server for load reporting as it does for EDS - // requests/responses. - lbCfg.LrsLoadReportingServerName = new(string) + // lbPolicy is set only when the policy is ringhash. The default (when it's + // not set) is roundrobin. And similarly, we only need to set XDSLBPolicy + // for ringhash (it also defaults to roundrobin). + if lbp := update.lbPolicy; lbp != nil { + lbCfg.XDSLBPolicy = &internalserviceconfig.BalancerConfig{ + Name: ringhash.Name, + Config: &ringhash.LBConfig{ + MinRingSize: lbp.MinimumRingSize, + MaxRingSize: lbp.MaximumRingSize, + }, + } } + ccState := balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), BalancerConfig: lbCfg, } - if err := b.edsLB.UpdateClientConnState(ccState); err != nil { - b.logger.Errorf("xds: edsBalancer.UpdateClientConnState(%+v) returned error: %v", ccState, err) + if err := b.childLB.UpdateClientConnState(ccState); err != nil { + b.logger.Errorf("xds: cluster_resolver balancer.UpdateClientConnState(%+v) returned error: %v", ccState, err) } } @@ -373,24 +372,33 @@ func (b *cdsBalancer) run() { b.handleClientConnUpdate(update) case *scUpdate: // SubConn updates are passthrough and are simply handed over to - // the underlying edsBalancer. - if b.edsLB == nil { - b.logger.Errorf("xds: received scUpdate {%+v} with no edsBalancer", update) + // the underlying cluster_resolver balancer. + if b.childLB == nil { + b.logger.Errorf("xds: received scUpdate {%+v} with no cluster_resolver balancer", update) break } - b.edsLB.UpdateSubConnState(update.subConn, update.state) - case *watchUpdate: - b.handleWatchUpdate(update) + b.childLB.UpdateSubConnState(update.subConn, update.state) + case exitIdle: + if b.childLB == nil { + b.logger.Errorf("xds: received ExitIdle with no child balancer") + break + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + } } + case u := <-b.clusterHandler.updateChannel: + b.handleWatchUpdate(u) case <-b.closed.Done(): - b.cancelWatch() - b.cancelWatch = func() {} - - if b.edsLB != nil { - b.edsLB.Close() - b.edsLB = nil + b.clusterHandler.close() + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil } - b.xdsClient.Close() if b.cachedRoot != nil { b.cachedRoot.Close() } @@ -417,23 +425,22 @@ func (b *cdsBalancer) run() { // - If it's from xds client, it means CDS resource were removed. The CDS // watcher should keep watching. // -// In both cases, the error will be forwarded to EDS balancer. And if error is -// resource-not-found, the child EDS balancer will stop watching EDS. +// In both cases, the error will be forwarded to the child balancer. And if +// error is resource-not-found, the child balancer will stop watching EDS. func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - // TODO: connection errors will be sent to the eds balancers directly, and - // also forwarded by the parent balancers/resolvers. So the eds balancer may - // see the same error multiple times. We way want to only forward the error - // to eds if it's not a connection error. - // // This is not necessary today, because xds client never sends connection // errors. if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { - b.cancelWatch() + b.clusterHandler.close() } - if b.edsLB != nil { - b.edsLB.ResolverError(err) + if b.childLB != nil { + if xdsclient.ErrType(err) != xdsclient.ErrorTypeConnection { + // Connection errors will be sent to the child balancers directly. + // There's no need to forward them. + b.childLB.ResolverError(err) + } } else { - // If eds balancer was never created, fail the RPCs with + // If child balancer was never created, fail the RPCs with // errors. b.ccw.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, @@ -442,16 +449,6 @@ func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { } } -// handleClusterUpdate is the CDS watch API callback. It simply pushes the -// received information on to the update channel for run() to pick it up. -func (b *cdsBalancer) handleClusterUpdate(cu xdsclient.ClusterUpdate, err error) { - if b.closed.HasFired() { - b.logger.Warningf("xds: received cluster update {%+v} after cdsBalancer was closed", cu) - return - } - b.updateCh.Put(&watchUpdate{cds: cu, err: err}) -} - // UpdateClientConnState receives the serviceConfig (which contains the // clusterName to watch for in CDS) and the xdsClient object from the // xdsResolver. @@ -461,7 +458,15 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro return errBalancerClosed } - b.logger.Infof("Received update from resolver, balancer config: %+v", state.BalancerConfig) + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(state.BalancerConfig)) // The errors checked here should ideally never happen because the // ServiceConfig in this case is prepared by the xdsResolver and is not // something that is received on the wire. @@ -503,6 +508,10 @@ func (b *cdsBalancer) Close() { <-b.done.Done() } +func (b *cdsBalancer) ExitIdle() { + b.updateCh.Put(exitIdle{}) +} + // ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at // creation and intercepts the NewSubConn() and UpdateAddresses() call from the // child policy to add security configuration required by xDS credentials. diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go new file mode 100644 index 0000000000..163a8c0a2e --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -0,0 +1,318 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "errors" + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient" +) + +var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") + +// clusterHandlerUpdate wraps the information received from the registered CDS +// watcher. A non-nil error is propagated to the underlying cluster_resolver +// balancer. A valid update results in creating a new cluster_resolver balancer +// (if one doesn't already exist) and pushing the update to it. +type clusterHandlerUpdate struct { + // securityCfg is the Security Config from the top (root) cluster. + securityCfg *xdsclient.SecurityConfig + // lbPolicy is the lb policy from the top (root) cluster. + // + // Currently, we only support roundrobin or ringhash, and since roundrobin + // does need configs, this is only set to the ringhash config, if the policy + // is ringhash. In the future, if we support more policies, we can make this + // an interface, and set it to config of the other policies. + lbPolicy *xdsclient.ClusterLBPolicyRingHash + + // updates is a list of ClusterUpdates from all the leaf clusters. + updates []xdsclient.ClusterUpdate + err error +} + +// clusterHandler will be given a name representing a cluster. It will then +// update the CDS policy constantly with a list of Clusters to pass down to +// XdsClusterResolverLoadBalancingPolicyConfig in a stream like fashion. +type clusterHandler struct { + parent *cdsBalancer + + // A mutex to protect entire tree of clusters. + clusterMutex sync.Mutex + root *clusterNode + rootClusterName string + + // A way to ping CDS Balancer about any updates or errors to a Node in the + // tree. This will either get called from this handler constructing an + // update or from a child with an error. Capacity of one as the only update + // CDS Balancer cares about is the most recent update. + updateChannel chan clusterHandlerUpdate +} + +func newClusterHandler(parent *cdsBalancer) *clusterHandler { + return &clusterHandler{ + parent: parent, + updateChannel: make(chan clusterHandlerUpdate, 1), + } +} + +func (ch *clusterHandler) updateRootCluster(rootClusterName string) { + ch.clusterMutex.Lock() + defer ch.clusterMutex.Unlock() + if ch.root == nil { + // Construct a root node on first update. + ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) + ch.rootClusterName = rootClusterName + return + } + // Check if root cluster was changed. If it was, delete old one and start + // new one, if not do nothing. + if rootClusterName != ch.rootClusterName { + ch.root.delete() + ch.root = createClusterNode(rootClusterName, ch.parent.xdsClient, ch) + ch.rootClusterName = rootClusterName + } +} + +// This function tries to construct a cluster update to send to CDS. +func (ch *clusterHandler) constructClusterUpdate() { + if ch.root == nil { + // If root is nil, this handler is closed, ignore the update. + return + } + clusterUpdate, err := ch.root.constructClusterUpdate() + if err != nil { + // If there was an error received no op, as this simply means one of the + // children hasn't received an update yet. + return + } + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-ch.updateChannel: + default: + } + ch.updateChannel <- clusterHandlerUpdate{ + securityCfg: ch.root.clusterUpdate.SecurityCfg, + lbPolicy: ch.root.clusterUpdate.LBPolicy, + updates: clusterUpdate, + } +} + +// close() is meant to be called by CDS when the CDS balancer is closed, and it +// cancels the watches for every cluster in the cluster tree. +func (ch *clusterHandler) close() { + ch.clusterMutex.Lock() + defer ch.clusterMutex.Unlock() + if ch.root == nil { + return + } + ch.root.delete() + ch.root = nil + ch.rootClusterName = "" +} + +// This logically represents a cluster. This handles all the logic for starting +// and stopping a cluster watch, handling any updates, and constructing a list +// recursively for the ClusterHandler. +type clusterNode struct { + // A way to cancel the watch for the cluster. + cancelFunc func() + + // A list of children, as the Node can be an aggregate Cluster. + children []*clusterNode + + // A ClusterUpdate in order to build a list of cluster updates for CDS to + // send down to child XdsClusterResolverLoadBalancingPolicy. + clusterUpdate xdsclient.ClusterUpdate + + // This boolean determines whether this Node has received an update or not. + // This isn't the best practice, but this will protect a list of Cluster + // Updates from being constructed if a cluster in the tree has not received + // an update yet. + receivedUpdate bool + + clusterHandler *clusterHandler +} + +// CreateClusterNode creates a cluster node from a given clusterName. This will +// also start the watch for that cluster. +func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler) *clusterNode { + c := &clusterNode{ + clusterHandler: topLevelHandler, + } + // Communicate with the xds client here. + topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName) + cancel := xdsClient.WatchCluster(clusterName, c.handleResp) + c.cancelFunc = func() { + topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName) + cancel() + } + return c +} + +// This function cancels the cluster watch on the cluster and all of it's +// children. +func (c *clusterNode) delete() { + c.cancelFunc() + for _, child := range c.children { + child.delete() + } +} + +// Construct cluster update (potentially a list of ClusterUpdates) for a node. +func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error) { + // If the cluster has not yet received an update, the cluster update is not + // yet ready. + if !c.receivedUpdate { + return nil, errNotReceivedUpdate + } + + // Base case - LogicalDNS or EDS. Both of these cluster types will be tied + // to a single ClusterUpdate. + if c.clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate { + return []xdsclient.ClusterUpdate{c.clusterUpdate}, nil + } + + // If an aggregate construct a list by recursively calling down to all of + // it's children. + var childrenUpdates []xdsclient.ClusterUpdate + for _, child := range c.children { + childUpdateList, err := child.constructClusterUpdate() + if err != nil { + return nil, err + } + childrenUpdates = append(childrenUpdates, childUpdateList...) + } + return childrenUpdates, nil +} + +// handleResp handles a xds response for a particular cluster. This function +// also handles any logic with regards to any child state that may have changed. +// At the end of the handleResp(), the clusterUpdate will be pinged in certain +// situations to try and construct an update to send back to CDS. +func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err error) { + c.clusterHandler.clusterMutex.Lock() + defer c.clusterHandler.clusterMutex.Unlock() + if err != nil { // Write this error for run() to pick up in CDS LB policy. + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-c.clusterHandler.updateChannel: + default: + } + c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err} + return + } + + c.receivedUpdate = true + c.clusterUpdate = clusterUpdate + + // If the cluster was a leaf node, if the cluster update received had change + // in the cluster update then the overall cluster update would change and + // there is a possibility for the overall update to build so ping cluster + // handler to return. Also, if there was any children from previously, + // delete the children, as the cluster type is no longer an aggregate + // cluster. + if clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate { + for _, child := range c.children { + child.delete() + } + c.children = nil + // This is an update in the one leaf node, should try to send an update + // to the parent CDS balancer. + // + // Note that this update might be a duplicate from the previous one. + // Because the update contains not only the cluster name to watch, but + // also the extra fields (e.g. security config). There's no good way to + // compare all the fields. + c.clusterHandler.constructClusterUpdate() + return + } + + // Aggregate cluster handling. + newChildren := make(map[string]bool) + for _, childName := range clusterUpdate.PrioritizedClusterNames { + newChildren[childName] = true + } + + // These booleans help determine whether this callback will ping the overall + // clusterHandler to try and construct an update to send back to CDS. This + // will be determined by whether there would be a change in the overall + // clusterUpdate for the whole tree (ex. change in clusterUpdate for current + // cluster or a deleted child) and also if there's even a possibility for + // the update to build (ex. if a child is created and a watch is started, + // that child hasn't received an update yet due to the mutex lock on this + // callback). + var createdChild, deletedChild bool + + // This map will represent the current children of the cluster. It will be + // first added to in order to represent the new children. It will then have + // any children deleted that are no longer present. Then, from the cluster + // update received, will be used to construct the new child list. + mapCurrentChildren := make(map[string]*clusterNode) + for _, child := range c.children { + mapCurrentChildren[child.clusterUpdate.ClusterName] = child + } + + // Add and construct any new child nodes. + for child := range newChildren { + if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { + createdChild = true + mapCurrentChildren[child] = createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler) + } + } + + // Delete any child nodes no longer in the aggregate cluster's children. + for child := range mapCurrentChildren { + if _, stillAChild := newChildren[child]; !stillAChild { + deletedChild = true + mapCurrentChildren[child].delete() + delete(mapCurrentChildren, child) + } + } + + // The order of the children list matters, so use the clusterUpdate from + // xdsclient as the ordering, and use that logical ordering for the new + // children list. This will be a mixture of child nodes which are all + // already constructed in the mapCurrentChildrenMap. + var children = make([]*clusterNode, 0, len(clusterUpdate.PrioritizedClusterNames)) + + for _, orderedChild := range clusterUpdate.PrioritizedClusterNames { + // The cluster's already have watches started for them in xds client, so + // you can use these pointers to construct the new children list, you + // just have to put them in the correct order using the original cluster + // update. + currentChild := mapCurrentChildren[orderedChild] + children = append(children, currentChild) + } + + c.children = children + + // If the cluster is an aggregate cluster, if this callback created any new + // child cluster nodes, then there's no possibility for a full cluster + // update to successfully build, as those created children will not have + // received an update yet. However, if there was simply a child deleted, + // then there is a possibility that it will have a full cluster update to + // build and also will have a changed overall cluster update from the + // deleted child. + if deletedChild && !createdChild { + c.clusterHandler.constructClusterUpdate() + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go new file mode 100644 index 0000000000..03d357b1f4 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -0,0 +1,543 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterimpl implements the xds_cluster_impl balancing policy. It +// handles the cluster features (e.g. circuit_breaking, RPC dropping). +// +// Note that it doesn't handle name resolution, which is done by policy +// xds_cluster_resolver. +package clusterimpl + +import ( + "encoding/json" + "fmt" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + xdsinternal "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/loadstore" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +const ( + // Name is the name of the cluster_impl balancer. + Name = "xds_cluster_impl_experimental" + defaultRequestCountMax = 1024 +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &clusterImplBalancer{ + ClientConn: cc, + bOpts: bOpts, + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + loadWrapper: loadstore.NewWrapper(), + scWrappers: make(map[balancer.SubConn]*scWrapper), + pickerUpdateCh: buffer.NewUnbounded(), + requestCountMax: defaultRequestCountMax, + } + b.logger = prefixLogger(b) + go b.run() + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type clusterImplBalancer struct { + balancer.ClientConn + + // mu guarantees mutual exclusion between Close() and handling of picker + // update to the parent ClientConn in run(). It's to make sure that the + // run() goroutine doesn't send picker update to parent after the balancer + // is closed. + // + // It's only used by the run() goroutine, but not the other exported + // functions. Because the exported functions are guaranteed to be + // synchronized with Close(). + mu sync.Mutex + closed *grpcsync.Event + done *grpcsync.Event + + bOpts balancer.BuildOptions + logger *grpclog.PrefixLogger + xdsClient xdsclient.XDSClient + + config *LBConfig + childLB balancer.Balancer + cancelLoadReport func() + edsServiceName string + lrsServerName *string + loadWrapper *loadstore.Wrapper + + clusterNameMu sync.Mutex + clusterName string + + scWrappersMu sync.Mutex + // The SubConns passed to the child policy are wrapped in a wrapper, to keep + // locality ID. But when the parent ClientConn sends updates, it's going to + // give the original SubConn, not the wrapper. But the child policies only + // know about the wrapper, so when forwarding SubConn updates, they must be + // sent for the wrappers. + // + // This keeps a map from original SubConn to wrapper, so that when + // forwarding the SubConn state update, the child policy will get the + // wrappers. + scWrappers map[balancer.SubConn]*scWrapper + + // childState/drops/requestCounter keeps the state used by the most recently + // generated picker. All fields can only be accessed in run(). And run() is + // the only goroutine that sends picker to the parent ClientConn. All + // requests to update picker need to be sent to pickerUpdateCh. + childState balancer.State + dropCategories []DropConfig // The categories for drops. + drops []*dropper + requestCounterCluster string // The cluster name for the request counter. + requestCounterService string // The service name for the request counter. + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 + pickerUpdateCh *buffer.Unbounded +} + +// updateLoadStore checks the config for load store, and decides whether it +// needs to restart the load reporting stream. +func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { + var updateLoadClusterAndService bool + + // ClusterName is different, restart. ClusterName is from ClusterName and + // EDSServiceName. + clusterName := b.getClusterName() + if clusterName != newConfig.Cluster { + updateLoadClusterAndService = true + b.setClusterName(newConfig.Cluster) + clusterName = newConfig.Cluster + } + if b.edsServiceName != newConfig.EDSServiceName { + updateLoadClusterAndService = true + b.edsServiceName = newConfig.EDSServiceName + } + if updateLoadClusterAndService { + // This updates the clusterName and serviceName that will be reported + // for the loads. The update here is too early, the perfect timing is + // when the picker is updated with the new connection. But from this + // balancer's point of view, it's impossible to tell. + // + // On the other hand, this will almost never happen. Each LRS policy + // shouldn't get updated config. The parent should do a graceful switch + // when the clusterName or serviceName is changed. + b.loadWrapper.UpdateClusterAndService(clusterName, b.edsServiceName) + } + + var ( + stopOldLoadReport bool + startNewLoadReport bool + ) + + // Check if it's necessary to restart load report. + if b.lrsServerName == nil { + if newConfig.LoadReportingServerName != nil { + // Old is nil, new is not nil, start new LRS. + b.lrsServerName = newConfig.LoadReportingServerName + startNewLoadReport = true + } + // Old is nil, new is nil, do nothing. + } else if newConfig.LoadReportingServerName == nil { + // Old is not nil, new is nil, stop old, don't start new. + b.lrsServerName = newConfig.LoadReportingServerName + stopOldLoadReport = true + } else { + // Old is not nil, new is not nil, compare string values, if + // different, stop old and start new. + if *b.lrsServerName != *newConfig.LoadReportingServerName { + b.lrsServerName = newConfig.LoadReportingServerName + stopOldLoadReport = true + startNewLoadReport = true + } + } + + if stopOldLoadReport { + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + if !startNewLoadReport { + // If a new LRS stream will be started later, no need to update + // it to nil here. + b.loadWrapper.UpdateLoadStore(nil) + } + } + } + if startNewLoadReport { + var loadStore *load.Store + if b.xdsClient != nil { + loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(*b.lrsServerName) + } + b.loadWrapper.UpdateLoadStore(loadStore) + } + + return nil +} + +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + if b.closed.HasFired() { + b.logger.Warningf("xds: received ClientConnState {%+v} after clusterImplBalancer was closed", s) + return nil + } + + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + + // Need to check for potential errors at the beginning of this function, so + // that on errors, we reject the whole config, instead of applying part of + // it. + bb := balancer.Get(newConfig.ChildPolicy.Name) + if bb == nil { + return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) + } + + if b.xdsClient == nil { + c := xdsclient.FromResolverState(s.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + + // Update load reporting config. This needs to be done before updating the + // child policy because we need the loadStore from the updated client to be + // passed to the ccWrapper, so that the next picker from the child policy + // will pick up the new loadStore. + if err := b.updateLoadStore(newConfig); err != nil { + return err + } + + // If child policy is a different type, recreate the sub-balancer. + if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { + if b.childLB != nil { + b.childLB.Close() + } + b.childLB = bb.Build(b, b.bOpts) + } + b.config = newConfig + + if b.childLB == nil { + // This is not an expected situation, and should be super rare in + // practice. + // + // When this happens, we already applied all the other configurations + // (drop/circuit breaking), but there's no child policy. This balancer + // will be stuck, and we report the error to the parent. + return fmt.Errorf("child policy is nil, this means balancer %q's Build() returned nil", newConfig.ChildPolicy.Name) + } + + // Notify run() of this new config, in case drop and request counter need + // update (which means a new picker needs to be generated). + b.pickerUpdateCh.Put(newConfig) + + // Addresses and sub-balancer config are sent to sub-balancer. + return b.childLB.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.config.ChildPolicy.Config, + }) +} + +func (b *clusterImplBalancer) ResolverError(err error) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) + return + } + + if b.childLB != nil { + b.childLB.ResolverError(err) + } +} + +func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s) + return + } + + // Trigger re-resolution when a SubConn turns transient failure. This is + // necessary for the LogicalDNS in cluster_resolver policy to re-resolve. + // + // Note that this happens not only for the addresses from DNS, but also for + // EDS (cluster_impl doesn't know if it's DNS or EDS, only the parent + // knows). The parent priority policy is configured to ignore re-resolution + // signal from the EDS children. + if s.ConnectivityState == connectivity.TransientFailure { + b.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) + } + + b.scWrappersMu.Lock() + if scw, ok := b.scWrappers[sc]; ok { + sc = scw + if s.ConnectivityState == connectivity.Shutdown { + // Remove this SubConn from the map on Shutdown. + delete(b.scWrappers, scw.SubConn) + } + } + b.scWrappersMu.Unlock() + if b.childLB != nil { + b.childLB.UpdateSubConnState(sc, s) + } +} + +func (b *clusterImplBalancer) Close() { + b.mu.Lock() + b.closed.Fire() + b.mu.Unlock() + + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil + } + <-b.done.Done() + b.logger.Infof("Shutdown") +} + +func (b *clusterImplBalancer) ExitIdle() { + if b.childLB == nil { + return + } + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + // Fallback for children that don't support ExitIdle -- connect to all + // SubConns. + for _, sc := range b.scWrappers { + sc.Connect() + } +} + +// Override methods to accept updates from the child LB. + +func (b *clusterImplBalancer) UpdateState(state balancer.State) { + // Instead of updating parent ClientConn inline, send state to run(). + b.pickerUpdateCh.Put(state) +} + +func (b *clusterImplBalancer) setClusterName(n string) { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + b.clusterName = n +} + +func (b *clusterImplBalancer) getClusterName() string { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + return b.clusterName +} + +// scWrapper is a wrapper of SubConn with locality ID. The locality ID can be +// retrieved from the addresses when creating SubConn. +// +// All SubConns passed to the child policies are wrapped in this, so that the +// picker can get the localityID from the picked SubConn, and do load reporting. +// +// After wrapping, all SubConns to and from the parent ClientConn (e.g. for +// SubConn state update, update/remove SubConn) must be the original SubConns. +// All SubConns to and from the child policy (NewSubConn, forwarding SubConn +// state update) must be the wrapper. The balancer keeps a map from the original +// SubConn to the wrapper for this purpose. +type scWrapper struct { + balancer.SubConn + // locality needs to be atomic because it can be updated while being read by + // the picker. + locality atomic.Value // type xdsinternal.LocalityID +} + +func (scw *scWrapper) updateLocalityID(lID xdsinternal.LocalityID) { + scw.locality.Store(lID) +} + +func (scw *scWrapper) localityID() xdsinternal.LocalityID { + lID, _ := scw.locality.Load().(xdsinternal.LocalityID) + return lID +} + +func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + clusterName := b.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID + for i, addr := range addrs { + newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) + } + sc, err := b.ClientConn.NewSubConn(newAddrs, opts) + if err != nil { + return nil, err + } + // Wrap this SubConn in a wrapper, and add it to the map. + b.scWrappersMu.Lock() + ret := &scWrapper{SubConn: sc} + ret.updateLocalityID(lID) + b.scWrappers[sc] = ret + b.scWrappersMu.Unlock() + return ret, nil +} + +func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { + scw, ok := sc.(*scWrapper) + if !ok { + b.ClientConn.RemoveSubConn(sc) + return + } + // Remove the original SubConn from the parent ClientConn. + // + // Note that we don't remove this SubConn from the scWrappers map. We will + // need it to forward the final SubConn state Shutdown to the child policy. + // + // This entry is kept in the map until it's state is changes to Shutdown, + // and will be deleted in UpdateSubConnState(). + b.ClientConn.RemoveSubConn(scw.SubConn) +} + +func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + clusterName := b.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID + for i, addr := range addrs { + newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) + } + if scw, ok := sc.(*scWrapper); ok { + scw.updateLocalityID(lID) + // Need to get the original SubConn from the wrapper before calling + // parent ClientConn. + sc = scw.SubConn + } + b.ClientConn.UpdateAddresses(sc, newAddrs) +} + +type dropConfigs struct { + drops []*dropper + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 +} + +// handleDropAndRequestCount compares drop and request counter in newConfig with +// the one currently used by picker. It returns a new dropConfigs if a new +// picker needs to be generated, otherwise it returns nil. +func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dropConfigs { + // Compare new drop config. And update picker if it's changed. + var updatePicker bool + if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { + b.dropCategories = newConfig.DropCategories + b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) + for _, c := range newConfig.DropCategories { + b.drops = append(b.drops, newDropper(c)) + } + updatePicker = true + } + + // Compare cluster name. And update picker if it's changed, because circuit + // breaking's stream counter will be different. + if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { + b.requestCounterCluster = newConfig.Cluster + b.requestCounterService = newConfig.EDSServiceName + b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) + updatePicker = true + } + // Compare upper bound of stream count. And update picker if it's changed. + // This is also for circuit breaking. + var newRequestCountMax uint32 = 1024 + if newConfig.MaxConcurrentRequests != nil { + newRequestCountMax = *newConfig.MaxConcurrentRequests + } + if b.requestCountMax != newRequestCountMax { + b.requestCountMax = newRequestCountMax + updatePicker = true + } + + if !updatePicker { + return nil + } + return &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + } +} + +func (b *clusterImplBalancer) run() { + defer b.done.Fire() + for { + select { + case update := <-b.pickerUpdateCh.Get(): + b.pickerUpdateCh.Load() + b.mu.Lock() + if b.closed.HasFired() { + b.mu.Unlock() + return + } + switch u := update.(type) { + case balancer.State: + b.childState = u + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: newPicker(b.childState, &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + }, b.loadWrapper), + }) + case *LBConfig: + dc := b.handleDropAndRequestCount(u) + if dc != nil && b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: newPicker(b.childState, dc, b.loadWrapper), + }) + } + } + b.mu.Unlock() + case <-b.closed.Done(): + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } + return + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go new file mode 100644 index 0000000000..51ff654f6e --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/config.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl + +import ( + "encoding/json" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// DropConfig contains the category, and drop ratio. +type DropConfig struct { + Category string + RequestsPerMillion uint32 +} + +// LBConfig is the balancer config for cluster_impl balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + Cluster string `json:"cluster,omitempty"` + EDSServiceName string `json:"edsServiceName,omitempty"` + LoadReportingServerName *string `json:"lrsLoadReportingServerName,omitempty"` + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + DropCategories []DropConfig `json:"dropCategories,omitempty"` + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + return &cfg, nil +} + +func equalDropCategories(a, b []DropConfig) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go similarity index 85% rename from vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/logging.go rename to vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go index be4d0a512d..3bbd1b0d78 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/logging.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/logging.go @@ -16,7 +16,7 @@ * */ -package edsbalancer +package clusterimpl import ( "fmt" @@ -25,10 +25,10 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const prefix = "[eds-lb %p] " +const prefix = "[xds-cluster-impl-lb %p] " var logger = grpclog.Component("xds") -func prefixLogger(p *edsBalancer) *internalgrpclog.PrefixLogger { +func prefixLogger(p *clusterImplBalancer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go new file mode 100644 index 0000000000..db29c550be --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -0,0 +1,191 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl + +import ( + orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/wrr" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +// NewRandomWRR is used when calculating drops. It's exported so that tests can +// override it. +var NewRandomWRR = wrr.NewRandom + +const million = 1000000 + +type dropper struct { + category string + w wrr.WRR +} + +// greatest common divisor (GCD) via Euclidean algorithm +func gcd(a, b uint32) uint32 { + for b != 0 { + t := b + b = a % b + a = t + } + return a +} + +func newDropper(c DropConfig) *dropper { + w := NewRandomWRR() + gcdv := gcd(c.RequestsPerMillion, million) + // Return true for RequestPerMillion, false for the rest. + w.Add(true, int64(c.RequestsPerMillion/gcdv)) + w.Add(false, int64((million-c.RequestsPerMillion)/gcdv)) + + return &dropper{ + category: c.Category, + w: w, + } +} + +func (d *dropper) drop() (ret bool) { + return d.w.Next().(bool) +} + +const ( + serverLoadCPUName = "cpu_utilization" + serverLoadMemoryName = "mem_utilization" +) + +// loadReporter wraps the methods from the loadStore that are used here. +type loadReporter interface { + CallStarted(locality string) + CallFinished(locality string, err error) + CallServerLoad(locality, name string, val float64) + CallDropped(locality string) +} + +// Picker implements RPC drop, circuit breaking drop and load reporting. +type picker struct { + drops []*dropper + s balancer.State + loadStore loadReporter + counter *xdsclient.ClusterRequestsCounter + countMax uint32 +} + +func newPicker(s balancer.State, config *dropConfigs, loadStore load.PerClusterReporter) *picker { + return &picker{ + drops: config.drops, + s: s, + loadStore: loadStore, + counter: config.requestCounter, + countMax: config.requestCountMax, + } +} + +func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + // Don't drop unless the inner picker is READY. Similar to + // https://github.com/grpc/grpc-go/issues/2622. + if d.s.ConnectivityState != connectivity.Ready { + return d.s.Picker.Pick(info) + } + + // Check if this RPC should be dropped by category. + for _, dp := range d.drops { + if dp.drop() { + if d.loadStore != nil { + d.loadStore.CallDropped(dp.category) + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") + } + } + + // Check if this RPC should be dropped by circuit breaking. + if d.counter != nil { + if err := d.counter.StartRequest(d.countMax); err != nil { + // Drops by circuit breaking are reported with empty category. They + // will be reported only in total drops, but not in per category. + if d.loadStore != nil { + d.loadStore.CallDropped("") + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) + } + } + + var lIDStr string + pr, err := d.s.Picker.Pick(info) + if scw, ok := pr.SubConn.(*scWrapper); ok { + // This OK check also covers the case err!=nil, because SubConn will be + // nil. + pr.SubConn = scw.SubConn + var e error + // If locality ID isn't found in the wrapper, an empty locality ID will + // be used. + lIDStr, e = scw.localityID().ToString() + if e != nil { + logger.Infof("failed to marshal LocalityID: %#v, loads won't be reported", scw.localityID()) + } + } + + if err != nil { + if d.counter != nil { + // Release one request count if this pick fails. + d.counter.EndRequest() + } + return pr, err + } + + if d.loadStore != nil { + d.loadStore.CallStarted(lIDStr) + oldDone := pr.Done + pr.Done = func(info balancer.DoneInfo) { + if oldDone != nil { + oldDone(info) + } + d.loadStore.CallFinished(lIDStr, info.Err) + + load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport) + if !ok { + return + } + d.loadStore.CallServerLoad(lIDStr, serverLoadCPUName, load.CpuUtilization) + d.loadStore.CallServerLoad(lIDStr, serverLoadMemoryName, load.MemUtilization) + for n, c := range load.RequestCost { + d.loadStore.CallServerLoad(lIDStr, n, c) + } + for n, c := range load.Utilization { + d.loadStore.CallServerLoad(lIDStr, n, c) + } + } + } + + if d.counter != nil { + // Update Done() so that when the RPC finishes, the request count will + // be released. + oldDone := pr.Done + pr.Done = func(doneInfo balancer.DoneInfo) { + d.counter.EndRequest() + if oldDone != nil { + oldDone(doneInfo) + } + } + } + + return pr, err +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go index 35eb86c359..6e0e03299f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/balancerstateaggregator.go @@ -183,13 +183,18 @@ func (bsa *balancerStateAggregator) build() balancer.State { // handling the special connecting after ready, as in UpdateState(). Then a // function to calculate the aggregated connectivity state as in this // function. - var readyN, connectingN int + // + // TODO: use balancer.ConnectivityStateEvaluator to calculate the aggregated + // state. + var readyN, connectingN, idleN int for _, ps := range bsa.idToPickerState { switch ps.stateToAggregate { case connectivity.Ready: readyN++ case connectivity.Connecting: connectingN++ + case connectivity.Idle: + idleN++ } } var aggregatedState connectivity.State @@ -198,6 +203,8 @@ func (bsa *balancerStateAggregator) build() balancer.State { aggregatedState = connectivity.Ready case connectingN > 0: aggregatedState = connectivity.Connecting + case idleN > 0: + aggregatedState = connectivity.Idle default: aggregatedState = connectivity.TransientFailure } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go index b4ae3710cd..318545d79b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/balancergroup" @@ -35,12 +36,12 @@ import ( const balancerName = "xds_cluster_manager_experimental" func init() { - balancer.Register(builder{}) + balancer.Register(bb{}) } -type builder struct{} +type bb struct{} -func (builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { b := &bal{} b.logger = prefixLogger(b) b.stateAggregator = newBalancerStateAggregator(cc, b.logger) @@ -51,11 +52,11 @@ func (builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balance return b } -func (builder) Name() string { +func (bb) Name() string { return balancerName } -func (builder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } @@ -115,7 +116,7 @@ func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } - b.logger.Infof("update with config %+v, resolver state %+v", s.BalancerConfig, s.ResolverState) + b.logger.Infof("update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) b.updateChildren(s, newConfig) return nil @@ -135,6 +136,10 @@ func (b *bal) Close() { b.logger.Infof("Shutdown") } +func (b *bal) ExitIdle() { + b.bg.ExitIdle() +} + const prefix = "[xds-cluster-manager-lb %p] " var logger = grpclog.Component("xds") diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go new file mode 100644 index 0000000000..66a5aab305 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -0,0 +1,378 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterresolver contains EDS balancer implementation. +package clusterresolver + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/xdsclient" +) + +// Name is the name of the cluster_resolver balancer. +const Name = "cluster_resolver_experimental" + +var ( + errBalancerClosed = errors.New("cdsBalancer is closed") + newChildBalancer = func(bb balancer.Builder, cc balancer.ClientConn, o balancer.BuildOptions) balancer.Balancer { + return bb.Build(cc, o) + } +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +// Build helps implement the balancer.Builder interface. +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + priorityBuilder := balancer.Get(priority.Name) + if priorityBuilder == nil { + logger.Errorf("priority balancer is needed but not registered") + return nil + } + priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) + if !ok { + logger.Errorf("priority balancer builder is not a config parser") + return nil + } + + b := &clusterResolverBalancer{ + bOpts: opts, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + + priorityBuilder: priorityBuilder, + priorityConfigParser: priorityConfigParser, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + + b.resourceWatcher = newResourceResolver(b) + b.cc = &ccWrapper{ + ClientConn: cc, + resourceWatcher: b.resourceWatcher, + } + + go b.run() + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(c), err) + } + return &cfg, nil +} + +// ccUpdate wraps a clientConn update received from gRPC (pushed from the +// xdsResolver). +type ccUpdate struct { + state balancer.ClientConnState + err error +} + +// scUpdate wraps a subConn update received from gRPC. This is directly passed +// on to the child balancer. +type scUpdate struct { + subConn balancer.SubConn + state balancer.SubConnState +} + +type exitIdle struct{} + +// clusterResolverBalancer manages xdsClient and the actual EDS balancer implementation that +// does load balancing. +// +// It currently has only an clusterResolverBalancer. Later, we may add fallback. +type clusterResolverBalancer struct { + cc balancer.ClientConn + bOpts balancer.BuildOptions + updateCh *buffer.Unbounded // Channel for updates from gRPC. + resourceWatcher *resourceResolver + logger *grpclog.PrefixLogger + closed *grpcsync.Event + done *grpcsync.Event + + priorityBuilder balancer.Builder + priorityConfigParser balancer.ConfigParser + + config *LBConfig + configRaw *serviceconfig.ParseResult + xdsClient xdsclient.XDSClient // xDS client to watch EDS resource. + attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. + + child balancer.Balancer + priorities []priorityConfig + watchUpdateReceived bool +} + +// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good +// updates lead to registration of EDS and DNS watches. Updates with error lead +// to cancellation of existing watch and propagation of the same error to the +// child balancer. +func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { + // We first handle errors, if any, and then proceed with handling the + // update, only if the status quo has changed. + if err := update.err; err != nil { + b.handleErrorFromUpdate(err, true) + return + } + + b.logger.Infof("Receive update from resolver, balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) + cfg, _ := update.state.BalancerConfig.(*LBConfig) + if cfg == nil { + b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", update.state.BalancerConfig) + return + } + + b.config = cfg + b.configRaw = update.state.ResolverState.ServiceConfig + b.resourceWatcher.updateMechanisms(cfg.DiscoveryMechanisms) + + if !b.watchUpdateReceived { + // If update was not received, wait for it. + return + } + // If eds resp was received before this, the child policy was created. We + // need to generate a new balancer config and send it to the child, because + // certain fields (unrelated to EDS watch) might have changed. + if err := b.updateChildConfig(); err != nil { + b.logger.Warningf("failed to update child policy config: %v", err) + } +} + +// handleWatchUpdate handles a watch update from the xDS Client. Good updates +// lead to clientConn updates being invoked on the underlying child balancer. +func (b *clusterResolverBalancer) handleWatchUpdate(update *resourceUpdate) { + if err := update.err; err != nil { + b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) + b.handleErrorFromUpdate(err, false) + return + } + + b.logger.Infof("resource update: %+v", pretty.ToJSON(update.priorities)) + b.watchUpdateReceived = true + b.priorities = update.priorities + + // A new EDS update triggers new child configs (e.g. different priorities + // for the priority balancer), and new addresses (the endpoints come from + // the EDS response). + if err := b.updateChildConfig(); err != nil { + b.logger.Warningf("failed to update child policy's balancer config: %v", err) + } +} + +// updateChildConfig builds a balancer config from eb's cached eds resp and +// service config, and sends that to the child balancer. Note that it also +// generates the addresses, because the endpoints come from the EDS resp. +// +// If child balancer doesn't already exist, one will be created. +func (b *clusterResolverBalancer) updateChildConfig() error { + // Child was build when the first EDS resp was received, so we just build + // the config and addresses. + if b.child == nil { + b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) + } + + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, b.config.XDSLBPolicy) + if err != nil { + return fmt.Errorf("failed to build priority balancer config: %v", err) + } + childCfg, err := b.priorityConfigParser.ParseConfig(childCfgBytes) + if err != nil { + return fmt.Errorf("failed to parse generated priority balancer config, this should never happen because the config is generated: %v", err) + } + b.logger.Infof("build balancer config: %v", pretty.ToJSON(childCfg)) + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addrs, + ServiceConfig: b.configRaw, + Attributes: b.attrsWithClient, + }, + BalancerConfig: childCfg, + }) +} + +// handleErrorFromUpdate handles both the error from parent ClientConn (from CDS +// balancer) and the error from xds client (from the watcher). fromParent is +// true if error is from parent ClientConn. +// +// If the error is connection error, it should be handled for fallback purposes. +// +// If the error is resource-not-found: +// - If it's from CDS balancer (shows as a resolver error), it means LDS or CDS +// resources were removed. The EDS watch should be canceled. +// - If it's from xds client, it means EDS resource were removed. The EDS +// watcher should keep watching. +// In both cases, the sub-balancers will be receive the error. +func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { + b.logger.Warningf("Received error: %v", err) + if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { + // This is an error from the parent ClientConn (can be the parent CDS + // balancer), and is a resource-not-found error. This means the resource + // (can be either LDS or CDS) was removed. Stop the EDS watch. + b.resourceWatcher.stop() + } + if b.child != nil { + b.child.ResolverError(err) + } else { + // If eds balancer was never created, fail the RPCs with errors. + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) + } + +} + +// run is a long-running goroutine which handles all updates from gRPC and +// xdsClient. All methods which are invoked directly by gRPC or xdsClient simply +// push an update onto a channel which is read and acted upon right here. +func (b *clusterResolverBalancer) run() { + for { + select { + case u := <-b.updateCh.Get(): + b.updateCh.Load() + switch update := u.(type) { + case *ccUpdate: + b.handleClientConnUpdate(update) + case *scUpdate: + // SubConn updates are simply handed over to the underlying + // child balancer. + if b.child == nil { + b.logger.Errorf("xds: received scUpdate {%+v} with no child balancer", update) + break + } + b.child.UpdateSubConnState(update.subConn, update.state) + case exitIdle: + if b.child == nil { + b.logger.Errorf("xds: received ExitIdle with no child balancer") + break + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.child.(balancer.ExitIdler); ok { + ei.ExitIdle() + } + } + case u := <-b.resourceWatcher.updateChannel: + b.handleWatchUpdate(u) + + // Close results in cancellation of the EDS watch and closing of the + // underlying child policy and is the only way to exit this goroutine. + case <-b.closed.Done(): + b.resourceWatcher.stop() + + if b.child != nil { + b.child.Close() + b.child = nil + } + // This is the *ONLY* point of return from this function. + b.logger.Infof("Shutdown") + b.done.Fire() + return + } + } +} + +// Following are methods to implement the balancer interface. + +// UpdateClientConnState receives the serviceConfig (which contains the +// clusterName to watch for in CDS) and the xdsClient object from the +// xdsResolver. +func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if b.closed.HasFired() { + b.logger.Warningf("xds: received ClientConnState {%+v} after clusterResolverBalancer was closed", state) + return errBalancerClosed + } + + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + b.attrsWithClient = state.ResolverState.Attributes + } + + b.updateCh.Put(&ccUpdate{state: state}) + return nil +} + +// ResolverError handles errors reported by the xdsResolver. +func (b *clusterResolverBalancer) ResolverError(err error) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received resolver error {%v} after clusterResolverBalancer was closed", err) + return + } + b.updateCh.Put(&ccUpdate{err: err}) +} + +// UpdateSubConnState handles subConn updates from gRPC. +func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received subConn update {%v, %v} after clusterResolverBalancer was closed", sc, state) + return + } + b.updateCh.Put(&scUpdate{subConn: sc, state: state}) +} + +// Close closes the cdsBalancer and the underlying child balancer. +func (b *clusterResolverBalancer) Close() { + b.closed.Fire() + <-b.done.Done() +} + +func (b *clusterResolverBalancer) ExitIdle() { + b.updateCh.Put(exitIdle{}) +} + +// ccWrapper overrides ResolveNow(), so that re-resolution from the child +// policies will trigger the DNS resolver in cluster_resolver balancer. +type ccWrapper struct { + balancer.ClientConn + resourceWatcher *resourceResolver +} + +func (c *ccWrapper) ResolveNow(resolver.ResolveNowOptions) { + c.resourceWatcher.resolveNow() +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go new file mode 100644 index 0000000000..a6a3cbab80 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/config.go @@ -0,0 +1,185 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "google.golang.org/grpc/balancer/roundrobin" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" +) + +// DiscoveryMechanismType is the type of discovery mechanism. +type DiscoveryMechanismType int + +const ( + // DiscoveryMechanismTypeEDS is eds. + DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:"EDS"` + // DiscoveryMechanismTypeLogicalDNS is DNS. + DiscoveryMechanismTypeLogicalDNS // `json:"LOGICAL_DNS"` +) + +// MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. +// +// This is necessary to handle enum (as strings) from JSON. +// +// Note that this needs to be defined on the type not pointer, otherwise the +// variables of this type will marshal to int not string. +func (t DiscoveryMechanismType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case DiscoveryMechanismTypeEDS: + buffer.WriteString("EDS") + case DiscoveryMechanismTypeLogicalDNS: + buffer.WriteString("LOGICAL_DNS") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +// UnmarshalJSON unmarshals a quoted json string to the DiscoveryMechanismType. +func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + switch s { + case "EDS": + *t = DiscoveryMechanismTypeEDS + case "LOGICAL_DNS": + *t = DiscoveryMechanismTypeLogicalDNS + default: + return fmt.Errorf("unable to unmarshal string %q to type DiscoveryMechanismType", s) + } + return nil +} + +// DiscoveryMechanism is the discovery mechanism, can be either EDS or DNS. +// +// For DNS, the ClientConn target will be used for name resolution. +// +// For EDS, if EDSServiceName is not empty, it will be used for watching. If +// EDSServiceName is empty, Cluster will be used. +type DiscoveryMechanism struct { + // Cluster is the cluster name. + Cluster string `json:"cluster,omitempty"` + // LoadReportingServerName is the LRS server to send load reports to. If + // not present, load reporting will be disabled. If set to the empty string, + // load reporting will be sent to the same server that we obtained CDS data + // from. + LoadReportingServerName *string `json:"lrsLoadReportingServerName,omitempty"` + // MaxConcurrentRequests is the maximum number of outstanding requests can + // be made to the upstream cluster. Default is 1024. + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + // Type is the discovery mechanism type. + Type DiscoveryMechanismType `json:"type,omitempty"` + // EDSServiceName is the EDS service name, as returned in CDS. May be unset + // if not specified in CDS. For type EDS only. + // + // This is used for EDS watch if set. If unset, Cluster is used for EDS + // watch. + EDSServiceName string `json:"edsServiceName,omitempty"` + // DNSHostname is the DNS name to resolve in "host:port" form. For type + // LOGICAL_DNS only. + DNSHostname string `json:"dnsHostname,omitempty"` +} + +// Equal returns whether the DiscoveryMechanism is the same with the parameter. +func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { + switch { + case dm.Cluster != b.Cluster: + return false + case !equalStringP(dm.LoadReportingServerName, b.LoadReportingServerName): + return false + case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): + return false + case dm.Type != b.Type: + return false + case dm.EDSServiceName != b.EDSServiceName: + return false + case dm.DNSHostname != b.DNSHostname: + return false + } + return true +} + +func equalStringP(a, b *string) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +func equalUint32P(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +// LBConfig is the config for cluster resolver balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // DiscoveryMechanisms is an ordered list of discovery mechanisms. + // + // Must have at least one element. Results from each discovery mechanism are + // concatenated together in successive priorities. + DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` + + // XDSLBPolicy specifies the policy for locality picking and endpoint picking. + // + // Note that it's not normal balancing policy, and it can only be either + // ROUND_ROBIN or RING_HASH. + // + // For ROUND_ROBIN, the policy name will be "ROUND_ROBIN", and the config + // will be empty. This sets the locality-picking policy to weighted_target + // and the endpoint-picking policy to round_robin. + // + // For RING_HASH, the policy name will be "RING_HASH", and the config will + // be lb config for the ring_hash_experimental LB Policy. ring_hash policy + // is responsible for both locality picking and endpoint picking. + XDSLBPolicy *internalserviceconfig.BalancerConfig `json:"xdsLbPolicy,omitempty"` +} + +const ( + rrName = roundrobin.Name + rhName = ringhash.Name +) + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + if lbp := cfg.XDSLBPolicy; lbp != nil && !strings.EqualFold(lbp.Name, rrName) && !strings.EqualFold(lbp.Name, rhName) { + return nil, fmt.Errorf("unsupported child policy with name %q, not one of {%q,%q}", lbp.Name, rrName, rhName) + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go new file mode 100644 index 0000000000..475497d489 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go @@ -0,0 +1,364 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "encoding/json" + "fmt" + "sort" + + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/hierarchy" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/weightedtarget" + "google.golang.org/grpc/xds/internal/xdsclient" +) + +const million = 1000000 + +// priorityConfig is config for one priority. For example, if there an EDS and a +// DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}]. +// +// Each priorityConfig corresponds to one discovery mechanism from the LBConfig +// generated by the CDS balancer. The CDS balancer resolves the cluster name to +// an ordered list of discovery mechanisms (if the top cluster is an aggregated +// cluster), one for each underlying cluster. +type priorityConfig struct { + mechanism DiscoveryMechanism + // edsResp is set only if type is EDS. + edsResp xdsclient.EndpointsUpdate + // addresses is set only if type is DNS. + addresses []string +} + +// buildPriorityConfigJSON builds balancer config for the passed in +// priorities. +// +// The built tree of balancers (see test for the output struct). +// +// If xds lb policy is ROUND_ROBIN, the children will be weighted_target for +// locality picking, and round_robin for endpoint picking. +// +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌───────────▼┐ ┌▼───────────┐ +// │cluster_impl│ │cluster_impl│ +// └─┬──────────┘ └──────────┬─┘ +// │ │ +// ┌──────────────▼─┐ ┌─▼──────────────┐ +// │locality_picking│ │locality_picking│ +// └┬──────────────┬┘ └┬──────────────┬┘ +// │ │ │ │ +// ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ ┌─▼─┐ +// │LRS│ │LRS│ │LRS│ │LRS│ +// └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘ +// │ │ │ │ +// ┌──────────▼─────┐ ┌─────▼──────────┐ ┌──────────▼─────┐ ┌─────▼──────────┐ +// │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ │endpoint_picking│ +// └────────────────┘ └────────────────┘ └────────────────┘ └────────────────┘ +// +// If xds lb policy is RING_HASH, the children will be just a ring_hash policy. +// The endpoints from all localities will be flattened to one addresses list, +// and the ring_hash policy will pick endpoints from it. +// +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌──────────▼─┐ ┌─▼──────────┐ +// │cluster_impl│ │cluster_impl│ +// └──────┬─────┘ └─────┬──────┘ +// │ │ +// ┌──────▼─────┐ ┌─────▼──────┐ +// │ ring_hash │ │ ring_hash │ +// └────────────┘ └────────────┘ +// +// If endpointPickingPolicy is nil, roundrobin will be used. +// +// Custom locality picking policy isn't support, and weighted_target is always +// used. +func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { + pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) + if err != nil { + return nil, nil, fmt.Errorf("failed to build priority config: %v", err) + } + ret, err := json.Marshal(pc) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) + } + return ret, addrs, nil +} + +func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address, error) { + var ( + retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} + retAddrs []resolver.Address + ) + for i, p := range priorities { + switch p.mechanism.Type { + case DiscoveryMechanismTypeEDS: + names, configs, addrs, err := buildClusterImplConfigForEDS(i, p.edsResp, p.mechanism, xdsLBPolicy) + if err != nil { + return nil, nil, err + } + retConfig.Priorities = append(retConfig.Priorities, names...) + for n, c := range configs { + retConfig.Children[n] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: c}, + // Ignore all re-resolution from EDS children. + IgnoreReresolutionRequests: true, + } + } + retAddrs = append(retAddrs, addrs...) + case DiscoveryMechanismTypeLogicalDNS: + name, config, addrs := buildClusterImplConfigForDNS(i, p.addresses) + retConfig.Priorities = append(retConfig.Priorities, name) + retConfig.Children[name] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config}, + // Not ignore re-resolution from DNS children, they will trigger + // DNS to re-resolve. + IgnoreReresolutionRequests: false, + } + retAddrs = append(retAddrs, addrs...) + } + } + return retConfig, retAddrs, nil +} + +func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string, *clusterimpl.LBConfig, []resolver.Address) { + // Endpoint picking policy for DNS is hardcoded to pick_first. + const childPolicy = "pick_first" + retAddrs := make([]resolver.Address, 0, len(addrStrs)) + pName := fmt.Sprintf("priority-%v", parentPriority) + for _, addrStr := range addrStrs { + retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) + } + return pName, &clusterimpl.LBConfig{ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}}, retAddrs +} + +// buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for +// each priority, sorted by priority, and the addresses for each priority (with +// hierarchy attributes set). +// +// For example, if there are two priorities, the returned values will be +// - ["p0", "p1"] +// - map{"p0":p0_config, "p1":p1_config} +// - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] +// - p0 addresses' hierarchy attributes are set to p0 +func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { + drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) + for _, d := range edsResp.Drops { + drops = append(drops, clusterimpl.DropConfig{ + Category: d.Category, + RequestsPerMillion: d.Numerator * million / d.Denominator, + }) + } + + priorityChildNames, priorities := groupLocalitiesByPriority(edsResp.Localities) + retNames := make([]string, 0, len(priorityChildNames)) + retAddrs := make([]resolver.Address, 0, len(priorityChildNames)) + retConfigs := make(map[string]*clusterimpl.LBConfig, len(priorityChildNames)) + for _, priorityName := range priorityChildNames { + priorityLocalities := priorities[priorityName] + // Prepend parent priority to the priority names, to avoid duplicates. + pName := fmt.Sprintf("priority-%v-%v", parentPriority, priorityName) + retNames = append(retNames, pName) + cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) + if err != nil { + return nil, nil, nil, err + } + retConfigs[pName] = cfg + retAddrs = append(retAddrs, addrs...) + } + return retNames, retConfigs, retAddrs, nil +} + +// groupLocalitiesByPriority returns the localities grouped by priority. +// +// It also returns a list of strings where each string represents a priority, +// and the list is sorted from higher priority to lower priority. +// +// For example, for L0-p0, L1-p0, L2-p1, results will be +// - ["p0", "p1"] +// - map{"p0":[L0, L1], "p1":[L2]} +func groupLocalitiesByPriority(localities []xdsclient.Locality) ([]string, map[string][]xdsclient.Locality) { + var priorityIntSlice []int + priorities := make(map[string][]xdsclient.Locality) + for _, locality := range localities { + if locality.Weight == 0 { + continue + } + priorityName := fmt.Sprintf("%v", locality.Priority) + priorities[priorityName] = append(priorities[priorityName], locality) + priorityIntSlice = append(priorityIntSlice, int(locality.Priority)) + } + // Sort the priorities based on the int value, deduplicate, and then turn + // the sorted list into a string list. This will be child names, in priority + // order. + sort.Ints(priorityIntSlice) + priorityIntSliceDeduped := dedupSortedIntSlice(priorityIntSlice) + priorityNameSlice := make([]string, 0, len(priorityIntSliceDeduped)) + for _, p := range priorityIntSliceDeduped { + priorityNameSlice = append(priorityNameSlice, fmt.Sprintf("%v", p)) + } + return priorityNameSlice, priorities +} + +func dedupSortedIntSlice(a []int) []int { + if len(a) == 0 { + return a + } + i, j := 0, 1 + for ; j < len(a); j++ { + if a[i] == a[j] { + continue + } + i++ + if i != j { + a[i] = a[j] + } + } + return a[:i+1] +} + +// rrBalancerConfig is a const roundrobin config, used as child of +// weighted-roundrobin. To avoid allocating memory everytime. +var rrBalancerConfig = &internalserviceconfig.BalancerConfig{Name: roundrobin.Name} + +// priorityLocalitiesToClusterImpl takes a list of localities (with the same +// priority), and generates a cluster impl policy config, and a list of +// addresses. +func priorityLocalitiesToClusterImpl(localities []xdsclient.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { + clusterImplCfg := &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + LoadReportingServerName: mechanism.LoadReportingServerName, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + DropCategories: drops, + // ChildPolicy is not set. Will be set based on xdsLBPolicy + } + + if xdsLBPolicy == nil || xdsLBPolicy.Name == rrName { + // If lb policy is ROUND_ROBIN: + // - locality-picking policy is weighted_target + // - endpoint-picking policy is round_robin + logger.Infof("xds lb policy is %q, building config with weighted_target + round_robin", rrName) + // Child of weighted_target is hardcoded to round_robin. + wtConfig, addrs := localitiesToWeightedTarget(localities, priorityName, rrBalancerConfig) + clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: weightedtarget.Name, Config: wtConfig} + return clusterImplCfg, addrs, nil + } + + if xdsLBPolicy.Name == rhName { + // If lb policy is RIHG_HASH, will build one ring_hash policy as child. + // The endpoints from all localities will be flattened to one addresses + // list, and the ring_hash policy will pick endpoints from it. + logger.Infof("xds lb policy is %q, building config with ring_hash", rhName) + addrs := localitiesToRingHash(localities, priorityName) + // Set child to ring_hash, note that the ring_hash config is from + // xdsLBPolicy. + clusterImplCfg.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: ringhash.Name, Config: xdsLBPolicy.Config} + return clusterImplCfg, addrs, nil + } + + return nil, nil, fmt.Errorf("unsupported xds LB policy %q, not one of {%q,%q}", xdsLBPolicy.Name, rrName, rhName) +} + +// localitiesToRingHash takes a list of localities (with the same priority), and +// generates a list of addresses. +// +// The addresses have path hierarchy set to [priority-name], so priority knows +// which child policy they are for. +func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) []resolver.Address { + var addrs []resolver.Address + for _, locality := range localities { + var lw uint32 = 1 + if locality.Weight != 0 { + lw = locality.Weight + } + localityStr, err := locality.ID.ToString() + if err != nil { + localityStr = fmt.Sprintf("%+v", locality.ID) + } + for _, endpoint := range locality.Endpoints { + // Filter out all "unhealthy" endpoints (unknown and healthy are + // both considered to be healthy: + // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). + if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { + continue + } + + var ew uint32 = 1 + if endpoint.Weight != 0 { + ew = endpoint.Weight + } + + // The weight of each endpoint is locality_weight * endpoint_weight. + ai := weightedroundrobin.AddrInfo{Weight: lw * ew} + addr := weightedroundrobin.SetAddrInfo(resolver.Address{Addr: endpoint.Address}, ai) + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) + addrs = append(addrs, addr) + } + } + return addrs +} + +// localitiesToWeightedTarget takes a list of localities (with the same +// priority), and generates a weighted target config, and list of addresses. +// +// The addresses have path hierarchy set to [priority-name, locality-name], so +// priority and weighted target know which child policy they are for. +func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) { + weightedTargets := make(map[string]weightedtarget.Target) + var addrs []resolver.Address + for _, locality := range localities { + localityStr, err := locality.ID.ToString() + if err != nil { + localityStr = fmt.Sprintf("%+v", locality.ID) + } + weightedTargets[localityStr] = weightedtarget.Target{Weight: locality.Weight, ChildPolicy: childPolicy} + for _, endpoint := range locality.Endpoints { + // Filter out all "unhealthy" endpoints (unknown and healthy are + // both considered to be healthy: + // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). + if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { + continue + } + + addr := resolver.Address{Addr: endpoint.Address} + if childPolicy.Name == weightedroundrobin.Name && endpoint.Weight != 0 { + ai := weightedroundrobin.AddrInfo{Weight: endpoint.Weight} + addr = weightedroundrobin.SetAddrInfo(addr, ai) + } + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) + addrs = append(addrs, addr) + } + } + return &weightedtarget.LBConfig{Targets: weightedTargets}, addrs +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go rename to vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go index af6f577197..728f1f709c 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/spiffe_appengine.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/logging.go @@ -1,5 +1,3 @@ -// +build appengine - /* * * Copyright 2020 gRPC authors. @@ -18,14 +16,19 @@ * */ -package credentials +package clusterresolver import ( - "crypto/tls" - "net/url" + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -// SPIFFEIDFromState is a no-op for appengine builds. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - return nil +const prefix = "[xds-cluster-resolver-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *clusterResolverBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go new file mode 100644 index 0000000000..2125bd2326 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -0,0 +1,247 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient" +) + +// resourceUpdate is a combined update from all the resources, in the order of +// priority. For example, it can be {EDS, EDS, DNS}. +type resourceUpdate struct { + priorities []priorityConfig + err error +} + +type discoveryMechanism interface { + lastUpdate() (interface{}, bool) + resolveNow() + stop() +} + +// discoveryMechanismKey is {type+resource_name}, it's used as the map key, so +// that the same resource resolver can be reused (e.g. when there are two +// mechanisms, both for the same EDS resource, but has different circuit +// breaking config. +type discoveryMechanismKey struct { + typ DiscoveryMechanismType + name string +} + +// resolverMechanismTuple is needed to keep the resolver and the discovery +// mechanism together, because resolvers can be shared. And we need the +// mechanism for fields like circuit breaking, LRS etc when generating the +// balancer config. +type resolverMechanismTuple struct { + dm DiscoveryMechanism + dmKey discoveryMechanismKey + r discoveryMechanism +} + +type resourceResolver struct { + parent *clusterResolverBalancer + updateChannel chan *resourceUpdate + + // mu protects the slice and map, and content of the resolvers in the slice. + mu sync.Mutex + mechanisms []DiscoveryMechanism + children []resolverMechanismTuple + childrenMap map[discoveryMechanismKey]discoveryMechanism +} + +func newResourceResolver(parent *clusterResolverBalancer) *resourceResolver { + return &resourceResolver{ + parent: parent, + updateChannel: make(chan *resourceUpdate, 1), + childrenMap: make(map[discoveryMechanismKey]discoveryMechanism), + } +} + +func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { + if len(a) != len(b) { + return false + } + for i, aa := range a { + bb := b[i] + if !aa.Equal(bb) { + return false + } + } + return true +} + +func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { + rr.mu.Lock() + defer rr.mu.Unlock() + if equalDiscoveryMechanisms(rr.mechanisms, mechanisms) { + return + } + rr.mechanisms = mechanisms + rr.children = make([]resolverMechanismTuple, len(mechanisms)) + newDMs := make(map[discoveryMechanismKey]bool) + + // Start one watch for each new discover mechanism {type+resource_name}. + for i, dm := range mechanisms { + switch dm.Type { + case DiscoveryMechanismTypeEDS: + // If EDSServiceName is not set, use the cluster name as EDS service + // name to watch. + nameToWatch := dm.EDSServiceName + if nameToWatch == "" { + nameToWatch = dm.Cluster + } + dmKey := discoveryMechanismKey{typ: dm.Type, name: nameToWatch} + newDMs[dmKey] = true + + r := rr.childrenMap[dmKey] + if r == nil { + r = newEDSResolver(nameToWatch, rr.parent.xdsClient, rr) + rr.childrenMap[dmKey] = r + } + rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} + case DiscoveryMechanismTypeLogicalDNS: + // Name to resolve in DNS is the hostname, not the ClientConn + // target. + dmKey := discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} + newDMs[dmKey] = true + + r := rr.childrenMap[dmKey] + if r == nil { + r = newDNSResolver(dm.DNSHostname, rr) + rr.childrenMap[dmKey] = r + } + rr.children[i] = resolverMechanismTuple{dm: dm, dmKey: dmKey, r: r} + } + } + // Stop the resources that were removed. + for dm, r := range rr.childrenMap { + if !newDMs[dm] { + delete(rr.childrenMap, dm) + r.stop() + } + } + // Regenerate even if there's no change in discovery mechanism, in case + // priority order changed. + rr.generate() +} + +// resolveNow is typically called to trigger re-resolve of DNS. The EDS +// resolveNow() is a noop. +func (rr *resourceResolver) resolveNow() { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, r := range rr.childrenMap { + r.resolveNow() + } +} + +func (rr *resourceResolver) stop() { + rr.mu.Lock() + defer rr.mu.Unlock() + for dm, r := range rr.childrenMap { + delete(rr.childrenMap, dm) + r.stop() + } + rr.mechanisms = nil + rr.children = nil +} + +// generate collects all the updates from all the resolvers, and push the +// combined result into the update channel. It only pushes the update when all +// the child resolvers have received at least one update, otherwise it will +// wait. +// +// caller must hold rr.mu. +func (rr *resourceResolver) generate() { + var ret []priorityConfig + for _, rDM := range rr.children { + r, ok := rr.childrenMap[rDM.dmKey] + if !ok { + rr.parent.logger.Infof("resolver for %+v not found, should never happen", rDM.dmKey) + continue + } + + u, ok := r.lastUpdate() + if !ok { + // Don't send updates to parent until all resolvers have update to + // send. + return + } + switch uu := u.(type) { + case xdsclient.EndpointsUpdate: + ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu}) + case []string: + ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu}) + } + } + select { + case <-rr.updateChannel: + default: + } + rr.updateChannel <- &resourceUpdate{priorities: ret} +} + +type edsDiscoveryMechanism struct { + cancel func() + + update xdsclient.EndpointsUpdate + updateReceived bool +} + +func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + if !er.updateReceived { + return nil, false + } + return er.update, true +} + +func (er *edsDiscoveryMechanism) resolveNow() { +} + +func (er *edsDiscoveryMechanism) stop() { + er.cancel() +} + +// newEDSResolver starts the EDS watch on the given xds client. +func newEDSResolver(nameToWatch string, xdsc xdsclient.XDSClient, topLevelResolver *resourceResolver) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{} + topLevelResolver.parent.logger.Infof("EDS watch started on %v", nameToWatch) + cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsclient.EndpointsUpdate, err error) { + topLevelResolver.mu.Lock() + defer topLevelResolver.mu.Unlock() + if err != nil { + select { + case <-topLevelResolver.updateChannel: + default: + } + topLevelResolver.updateChannel <- &resourceUpdate{err: err} + return + } + ret.update = update + ret.updateReceived = true + topLevelResolver.generate() + }) + ret.cancel = func() { + topLevelResolver.parent.logger.Infof("EDS watch canceled on %v", nameToWatch) + cancel() + } + return ret +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go new file mode 100644 index 0000000000..7a639f51a5 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + newDNS = func(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + // The dns resolver is registered by the grpc package. So, this call to + // resolver.Get() is never expected to return nil. + return resolver.Get("dns").Build(target, cc, opts) + } +) + +// dnsDiscoveryMechanism watches updates for the given DNS hostname. +// +// It implements resolver.ClientConn interface to work with the DNS resolver. +type dnsDiscoveryMechanism struct { + target string + topLevelResolver *resourceResolver + r resolver.Resolver + + addrs []string + updateReceived bool +} + +func newDNSResolver(target string, topLevelResolver *resourceResolver) *dnsDiscoveryMechanism { + ret := &dnsDiscoveryMechanism{ + target: target, + topLevelResolver: topLevelResolver, + } + r, err := newDNS(resolver.Target{Scheme: "dns", Endpoint: target}, ret, resolver.BuildOptions{}) + if err != nil { + select { + case <-topLevelResolver.updateChannel: + default: + } + topLevelResolver.updateChannel <- &resourceUpdate{err: err} + } + ret.r = r + return ret +} + +func (dr *dnsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + if !dr.updateReceived { + return nil, false + } + return dr.addrs, true +} + +func (dr *dnsDiscoveryMechanism) resolveNow() { + dr.r.ResolveNow(resolver.ResolveNowOptions{}) +} + +func (dr *dnsDiscoveryMechanism) stop() { + dr.r.Close() +} + +// dnsDiscoveryMechanism needs to implement resolver.ClientConn interface to receive +// updates from the real DNS resolver. + +func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { + dr.topLevelResolver.mu.Lock() + defer dr.topLevelResolver.mu.Unlock() + addrs := make([]string, len(state.Addresses)) + for i, a := range state.Addresses { + addrs[i] = a.Addr + } + dr.addrs = addrs + dr.updateReceived = true + dr.topLevelResolver.generate() + return nil +} + +func (dr *dnsDiscoveryMechanism) ReportError(err error) { + select { + case <-dr.topLevelResolver.updateChannel: + default: + } + dr.topLevelResolver.updateChannel <- &resourceUpdate{err: err} +} + +func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { + dr.UpdateState(resolver.State{Addresses: addresses}) +} + +func (dr *dnsDiscoveryMechanism) NewServiceConfig(string) { + // This method is deprecated, and service config isn't supported. +} + +func (dr *dnsDiscoveryMechanism) ParseServiceConfig(string) *serviceconfig.ParseResult { + return &serviceconfig.ParseResult{Err: fmt.Errorf("service config not supported")} +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/config.go deleted file mode 100644 index 11c1338c81..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/config.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "encoding/json" - "fmt" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/serviceconfig" -) - -// EDSConfig represents the loadBalancingConfig section of the service config -// for EDS balancers. -type EDSConfig struct { - serviceconfig.LoadBalancingConfig - // ChildPolicy represents the load balancing config for the child - // policy. - ChildPolicy *loadBalancingConfig - // FallBackPolicy represents the load balancing config for the - // fallback. - FallBackPolicy *loadBalancingConfig - // Name to use in EDS query. If not present, defaults to the server - // name from the target URI. - EDSServiceName string - // MaxConcurrentRequests is the max number of concurrent request allowed for - // this service. If unset, default value 1024 is used. - // - // Note that this is not defined in the service config proto. And the reason - // is, we are dropping EDS and moving the features into cluster_impl. But in - // the mean time, to keep things working, we need to add this field. And it - // should be fine to add this extra field here, because EDS is only used in - // CDS today, so we have full control. - MaxConcurrentRequests *uint32 - // LRS server to send load reports to. If not present, load reporting - // will be disabled. If set to the empty string, load reporting will - // be sent to the same server that we obtained CDS data from. - LrsLoadReportingServerName *string -} - -// edsConfigJSON is the intermediate unmarshal result of EDSConfig. ChildPolicy -// and Fallbackspolicy are post-processed, and for each, the first installed -// policy is kept. -type edsConfigJSON struct { - ChildPolicy []*loadBalancingConfig - FallbackPolicy []*loadBalancingConfig - EDSServiceName string - MaxConcurrentRequests *uint32 - LRSLoadReportingServerName *string -} - -// UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. -// When unmarshalling, we iterate through the childPolicy/fallbackPolicy lists -// and select the first LB policy which has been registered. -func (l *EDSConfig) UnmarshalJSON(data []byte) error { - var configJSON edsConfigJSON - if err := json.Unmarshal(data, &configJSON); err != nil { - return err - } - - l.EDSServiceName = configJSON.EDSServiceName - l.MaxConcurrentRequests = configJSON.MaxConcurrentRequests - l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName - - for _, lbcfg := range configJSON.ChildPolicy { - if balancer.Get(lbcfg.Name) != nil { - l.ChildPolicy = lbcfg - break - } - } - - for _, lbcfg := range configJSON.FallbackPolicy { - if balancer.Get(lbcfg.Name) != nil { - l.FallBackPolicy = lbcfg - break - } - } - return nil -} - -// MarshalJSON returns a JSON encoding of l. -func (l *EDSConfig) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("EDSConfig.MarshalJSON() is unimplemented") -} - -// loadBalancingConfig represents a single load balancing config, -// stored in JSON format. -type loadBalancingConfig struct { - Name string - Config json.RawMessage -} - -// MarshalJSON returns a JSON encoding of l. -func (l *loadBalancingConfig) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("loadBalancingConfig.MarshalJSON() is unimplemented") -} - -// UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. -func (l *loadBalancingConfig) UnmarshalJSON(data []byte) error { - var cfg map[string]json.RawMessage - if err := json.Unmarshal(data, &cfg); err != nil { - return err - } - for name, config := range cfg { - l.Name = name - l.Config = config - } - return nil -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds.go b/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds.go deleted file mode 100644 index d1a226e989..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds.go +++ /dev/null @@ -1,400 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package edsbalancer contains EDS balancer implementation. -package edsbalancer - -import ( - "encoding/json" - "fmt" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/serviceconfig" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" -) - -const edsName = "eds_experimental" - -// xdsClientInterface contains only the xds_client methods needed by EDS -// balancer. It's defined so we can override xdsclient.New function in tests. -type xdsClientInterface interface { - WatchEndpoints(clusterName string, edsCb func(xdsclient.EndpointsUpdate, error)) (cancel func()) - ReportLoad(server string) (loadStore *load.Store, cancel func()) - Close() -} - -var ( - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lw load.PerClusterReporter, logger *grpclog.PrefixLogger) edsBalancerImplInterface { - return newEDSBalancerImpl(cc, opts, enqueueState, lw, logger) - } - newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } -) - -func init() { - balancer.Register(&edsBalancerBuilder{}) -} - -type edsBalancerBuilder struct{} - -// Build helps implement the balancer.Builder interface. -func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - x := &edsBalancer{ - cc: cc, - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - grpcUpdate: make(chan interface{}), - xdsClientUpdate: make(chan *edsUpdate), - childPolicyUpdate: buffer.NewUnbounded(), - lsw: &loadStoreWrapper{}, - config: &EDSConfig{}, - } - x.logger = prefixLogger(x) - - client, err := newXDSClient() - if err != nil { - x.logger.Errorf("xds: failed to create xds-client: %v", err) - return nil - } - - x.xdsClient = client - x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.lsw, x.logger) - x.logger.Infof("Created") - go x.run() - return x -} - -func (b *edsBalancerBuilder) Name() string { - return edsName -} - -func (b *edsBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg EDSConfig - if err := json.Unmarshal(c, &cfg); err != nil { - return nil, fmt.Errorf("unable to unmarshal balancer config %s into EDSConfig, error: %v", string(c), err) - } - return &cfg, nil -} - -// edsBalancerImplInterface defines the interface that edsBalancerImpl must -// implement to communicate with edsBalancer. -// -// It's implemented by the real eds balancer and a fake testing eds balancer. -type edsBalancerImplInterface interface { - // handleEDSResponse passes the received EDS message from traffic director - // to eds balancer. - handleEDSResponse(edsResp xdsclient.EndpointsUpdate) - // handleChildPolicy updates the eds balancer the intra-cluster load - // balancing policy to use. - handleChildPolicy(name string, config json.RawMessage) - // handleSubConnStateChange handles state change for SubConn. - handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) - // updateState handle a balancer state update from the priority. - updateState(priority priorityType, s balancer.State) - // updateServiceRequestsConfig updates the service requests counter to the - // one for the given service name. - updateServiceRequestsConfig(serviceName string, max *uint32) - // updateClusterName updates the cluster name that will be attached to the - // address attributes. - updateClusterName(name string) - // close closes the eds balancer. - close() -} - -// edsBalancer manages xdsClient and the actual EDS balancer implementation that -// does load balancing. -// -// It currently has only an edsBalancer. Later, we may add fallback. -type edsBalancer struct { - cc balancer.ClientConn - closed *grpcsync.Event - done *grpcsync.Event - logger *grpclog.PrefixLogger - - // edsBalancer continuously monitors the channels below, and will handle - // events from them in sync. - grpcUpdate chan interface{} - xdsClientUpdate chan *edsUpdate - childPolicyUpdate *buffer.Unbounded - - xdsClient xdsClientInterface - lsw *loadStoreWrapper - config *EDSConfig // may change when passed a different service config - edsImpl edsBalancerImplInterface - - // edsServiceName is the edsServiceName currently being watched, not - // necessary the edsServiceName from service config. - edsServiceName string - cancelEndpointsWatch func() - loadReportServer *string // LRS is disabled if loadReporterServer is nil. - cancelLoadReport func() -} - -// run gets executed in a goroutine once edsBalancer is created. It monitors -// updates from grpc, xdsClient and load balancer. It synchronizes the -// operations that happen inside edsBalancer. It exits when edsBalancer is -// closed. -func (x *edsBalancer) run() { - for { - select { - case update := <-x.grpcUpdate: - x.handleGRPCUpdate(update) - case update := <-x.xdsClientUpdate: - x.handleXDSClientUpdate(update) - case update := <-x.childPolicyUpdate.Get(): - x.childPolicyUpdate.Load() - u := update.(*balancerStateWithPriority) - x.edsImpl.updateState(u.priority, u.s) - case <-x.closed.Done(): - x.cancelWatch() - x.xdsClient.Close() - x.edsImpl.close() - x.logger.Infof("Shutdown") - x.done.Fire() - return - } - } -} - -// handleErrorFromUpdate handles both the error from parent ClientConn (from CDS -// balancer) and the error from xds client (from the watcher). fromParent is -// true if error is from parent ClientConn. -// -// If the error is connection error, it should be handled for fallback purposes. -// -// If the error is resource-not-found: -// - If it's from CDS balancer (shows as a resolver error), it means LDS or CDS -// resources were removed. The EDS watch should be canceled. -// - If it's from xds client, it means EDS resource were removed. The EDS -// watcher should keep watching. -// In both cases, the sub-balancers will be closed, and the future picks will -// fail. -func (x *edsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - x.logger.Warningf("Received error: %v", err) - if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { - if fromParent { - // This is an error from the parent ClientConn (can be the parent - // CDS balancer), and is a resource-not-found error. This means the - // resource (can be either LDS or CDS) was removed. Stop the EDS - // watch. - x.cancelWatch() - } - x.edsImpl.handleEDSResponse(xdsclient.EndpointsUpdate{}) - } -} - -func (x *edsBalancer) handleGRPCUpdate(update interface{}) { - switch u := update.(type) { - case *subConnStateUpdate: - x.edsImpl.handleSubConnStateChange(u.sc, u.state.ConnectivityState) - case *balancer.ClientConnState: - x.logger.Infof("Receive update from resolver, balancer config: %+v", u.BalancerConfig) - cfg, _ := u.BalancerConfig.(*EDSConfig) - if cfg == nil { - // service config parsing failed. should never happen. - return - } - - if err := x.handleServiceConfigUpdate(cfg); err != nil { - x.logger.Warningf("failed to update xDS client: %v", err) - } - - x.edsImpl.updateServiceRequestsConfig(cfg.EDSServiceName, cfg.MaxConcurrentRequests) - - // We will update the edsImpl with the new child policy, if we got a - // different one. - if !cmp.Equal(cfg.ChildPolicy, x.config.ChildPolicy, cmpopts.EquateEmpty()) { - if cfg.ChildPolicy != nil { - x.edsImpl.handleChildPolicy(cfg.ChildPolicy.Name, cfg.ChildPolicy.Config) - } else { - x.edsImpl.handleChildPolicy(roundrobin.Name, nil) - } - } - x.config = cfg - case error: - x.handleErrorFromUpdate(u, true) - default: - // unreachable path - x.logger.Errorf("wrong update type: %T", update) - } -} - -// handleServiceConfigUpdate applies the service config update, watching a new -// EDS service name and restarting LRS stream, as required. -func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { - // Restart EDS watch when the edsServiceName has changed. - if x.edsServiceName != config.EDSServiceName { - x.edsServiceName = config.EDSServiceName - x.startEndpointsWatch() - // TODO: this update for the LRS service name is too early. It should - // only apply to the new EDS response. But this is applied to the RPCs - // before the new EDS response. To fully fix this, the EDS balancer - // needs to do a graceful switch to another EDS implementation. - // - // This is OK for now, because we don't actually expect edsServiceName - // to change. Fix this (a bigger change) will happen later. - x.lsw.updateServiceName(x.edsServiceName) - x.edsImpl.updateClusterName(x.edsServiceName) - } - - // Restart load reporting when the loadReportServer name has changed. - if !equalStringPointers(x.loadReportServer, config.LrsLoadReportingServerName) { - loadStore := x.startLoadReport(config.LrsLoadReportingServerName) - x.lsw.updateLoadStore(loadStore) - } - - return nil -} - -// startEndpointsWatch starts the EDS watch. -// -// This usually means load report needs to be restarted, but this function does -// NOT do that. Caller needs to call startLoadReport separately. -func (x *edsBalancer) startEndpointsWatch() { - if x.cancelEndpointsWatch != nil { - x.cancelEndpointsWatch() - } - cancelEDSWatch := x.xdsClient.WatchEndpoints(x.edsServiceName, func(update xdsclient.EndpointsUpdate, err error) { - x.logger.Infof("Watch update from xds-client %p, content: %+v", x.xdsClient, update) - x.handleEDSUpdate(update, err) - }) - x.logger.Infof("Watch started on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) - x.cancelEndpointsWatch = func() { - cancelEDSWatch() - x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) - } -} - -func (x *edsBalancer) cancelWatch() { - x.loadReportServer = nil - if x.cancelLoadReport != nil { - x.cancelLoadReport() - } - x.edsServiceName = "" - if x.cancelEndpointsWatch != nil { - x.cancelEndpointsWatch() - } -} - -// startLoadReport starts load reporting. If there's already a load reporting in -// progress, it cancels that. -// -// Caller can cal this when the loadReportServer name changes, but -// edsServiceName doesn't (so we only need to restart load reporting, not EDS -// watch). -func (x *edsBalancer) startLoadReport(loadReportServer *string) *load.Store { - x.loadReportServer = loadReportServer - if x.cancelLoadReport != nil { - x.cancelLoadReport() - } - if loadReportServer == nil { - return nil - } - ls, cancel := x.xdsClient.ReportLoad(*loadReportServer) - x.cancelLoadReport = cancel - return ls -} - -func (x *edsBalancer) handleXDSClientUpdate(update *edsUpdate) { - if err := update.err; err != nil { - x.handleErrorFromUpdate(err, false) - return - } - x.edsImpl.handleEDSResponse(update.resp) -} - -type subConnStateUpdate struct { - sc balancer.SubConn - state balancer.SubConnState -} - -func (x *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - update := &subConnStateUpdate{ - sc: sc, - state: state, - } - select { - case x.grpcUpdate <- update: - case <-x.closed.Done(): - } -} - -func (x *edsBalancer) ResolverError(err error) { - select { - case x.grpcUpdate <- err: - case <-x.closed.Done(): - } -} - -func (x *edsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - select { - case x.grpcUpdate <- &s: - case <-x.closed.Done(): - } - return nil -} - -type edsUpdate struct { - resp xdsclient.EndpointsUpdate - err error -} - -func (x *edsBalancer) handleEDSUpdate(resp xdsclient.EndpointsUpdate, err error) { - select { - case x.xdsClientUpdate <- &edsUpdate{resp: resp, err: err}: - case <-x.closed.Done(): - } -} - -type balancerStateWithPriority struct { - priority priorityType - s balancer.State -} - -func (x *edsBalancer) enqueueChildBalancerState(p priorityType, s balancer.State) { - x.childPolicyUpdate.Put(&balancerStateWithPriority{ - priority: p, - s: s, - }) -} - -func (x *edsBalancer) Close() { - x.closed.Fire() - <-x.done.Done() -} - -// equalStringPointers returns true if -// - a and b are both nil OR -// - *a == *b (and a and b are both non-nil) -func equalStringPointers(a, b *string) bool { - if a == nil && b == nil { - return true - } - if a == nil || b == nil { - return false - } - return *a == *b -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_impl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_impl.go deleted file mode 100644 index 63b75caae8..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_impl.go +++ /dev/null @@ -1,601 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "encoding/json" - "reflect" - "sync" - "time" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/balancer/weightedroundrobin" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" - xdsi "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" - "google.golang.org/grpc/xds/internal/client" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" -) - -// TODO: make this a environment variable? -var defaultPriorityInitTimeout = 10 * time.Second - -const defaultServiceRequestCountMax = 1024 - -type localityConfig struct { - weight uint32 - addrs []resolver.Address -} - -// balancerGroupWithConfig contains the localities with the same priority. It -// manages all localities using a balancerGroup. -type balancerGroupWithConfig struct { - bg *balancergroup.BalancerGroup - stateAggregator *weightedaggregator.Aggregator - configs map[xdsi.LocalityID]*localityConfig -} - -// edsBalancerImpl does load balancing based on the EDS responses. Note that it -// doesn't implement the balancer interface. It's intended to be used by a high -// level balancer implementation. -// -// The localities are picked as weighted round robin. A configurable child -// policy is used to manage endpoints in each locality. -type edsBalancerImpl struct { - cc balancer.ClientConn - buildOpts balancer.BuildOptions - logger *grpclog.PrefixLogger - loadReporter load.PerClusterReporter - - enqueueChildBalancerStateUpdate func(priorityType, balancer.State) - - subBalancerBuilder balancer.Builder - priorityToLocalities map[priorityType]*balancerGroupWithConfig - respReceived bool - - // There's no need to hold any mutexes at the same time. The order to take - // mutex should be: priorityMu > subConnMu, but this is implicit via - // balancers (starting balancer with next priority while holding priorityMu, - // and the balancer may create new SubConn). - - priorityMu sync.Mutex - // priorities are pointers, and will be nil when EDS returns empty result. - priorityInUse priorityType - priorityLowest priorityType - priorityToState map[priorityType]*balancer.State - // The timer to give a priority 10 seconds to connect. And if the priority - // doesn't go into Ready/Failure, start the next priority. - // - // One timer is enough because there can be at most one priority in init - // state. - priorityInitTimer *time.Timer - - subConnMu sync.Mutex - subConnToPriority map[balancer.SubConn]priorityType - - pickerMu sync.Mutex - dropConfig []xdsclient.OverloadDropConfig - drops []*dropper - innerState balancer.State // The state of the picker without drop support. - serviceRequestsCounter *client.ServiceRequestsCounter - serviceRequestCountMax uint32 - - clusterNameMu sync.Mutex - clusterName string -} - -// newEDSBalancerImpl create a new edsBalancerImpl. -func newEDSBalancerImpl(cc balancer.ClientConn, bOpts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lr load.PerClusterReporter, logger *grpclog.PrefixLogger) *edsBalancerImpl { - edsImpl := &edsBalancerImpl{ - cc: cc, - buildOpts: bOpts, - logger: logger, - subBalancerBuilder: balancer.Get(roundrobin.Name), - loadReporter: lr, - - enqueueChildBalancerStateUpdate: enqueueState, - - priorityToLocalities: make(map[priorityType]*balancerGroupWithConfig), - priorityToState: make(map[priorityType]*balancer.State), - subConnToPriority: make(map[balancer.SubConn]priorityType), - serviceRequestCountMax: defaultServiceRequestCountMax, - } - // Don't start balancer group here. Start it when handling the first EDS - // response. Otherwise the balancer group will be started with round-robin, - // and if users specify a different sub-balancer, all balancers in balancer - // group will be closed and recreated when sub-balancer update happens. - return edsImpl -} - -// handleChildPolicy updates the child balancers handling endpoints. Child -// policy is roundrobin by default. If the specified balancer is not installed, -// the old child balancer will be used. -// -// HandleChildPolicy and HandleEDSResponse must be called by the same goroutine. -func (edsImpl *edsBalancerImpl) handleChildPolicy(name string, config json.RawMessage) { - if edsImpl.subBalancerBuilder.Name() == name { - return - } - newSubBalancerBuilder := balancer.Get(name) - if newSubBalancerBuilder == nil { - edsImpl.logger.Infof("edsBalancerImpl: failed to find balancer with name %q, keep using %q", name, edsImpl.subBalancerBuilder.Name()) - return - } - edsImpl.subBalancerBuilder = newSubBalancerBuilder - for _, bgwc := range edsImpl.priorityToLocalities { - if bgwc == nil { - continue - } - for lid, config := range bgwc.configs { - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - // TODO: (eds) add support to balancer group to support smoothly - // switching sub-balancers (keep old balancer around until new - // balancer becomes ready). - bgwc.bg.Remove(lidJSON) - bgwc.bg.Add(lidJSON, edsImpl.subBalancerBuilder) - bgwc.bg.UpdateClientConnState(lidJSON, balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: config.addrs}, - }) - // This doesn't need to manually update picker, because the new - // sub-balancer will send it's picker later. - } - } -} - -// updateDrops compares new drop policies with the old. If they are different, -// it updates the drop policies and send ClientConn an updated picker. -func (edsImpl *edsBalancerImpl) updateDrops(dropConfig []xdsclient.OverloadDropConfig) { - if cmp.Equal(dropConfig, edsImpl.dropConfig) { - return - } - edsImpl.pickerMu.Lock() - edsImpl.dropConfig = dropConfig - var newDrops []*dropper - for _, c := range edsImpl.dropConfig { - newDrops = append(newDrops, newDropper(c)) - } - edsImpl.drops = newDrops - if edsImpl.innerState.Picker != nil { - // Update picker with old inner picker, new drops. - edsImpl.cc.UpdateState(balancer.State{ - ConnectivityState: edsImpl.innerState.ConnectivityState, - Picker: newDropPicker(edsImpl.innerState.Picker, newDrops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}, - ) - } - edsImpl.pickerMu.Unlock() -} - -// handleEDSResponse handles the EDS response and creates/deletes localities and -// SubConns. It also handles drops. -// -// HandleChildPolicy and HandleEDSResponse must be called by the same goroutine. -func (edsImpl *edsBalancerImpl) handleEDSResponse(edsResp xdsclient.EndpointsUpdate) { - // TODO: Unhandled fields from EDS response: - // - edsResp.GetPolicy().GetOverprovisioningFactor() - // - locality.GetPriority() - // - lbEndpoint.GetMetadata(): contains BNS name, send to sub-balancers - // - as service config or as resolved address - // - if socketAddress is not ip:port - // - socketAddress.GetNamedPort(), socketAddress.GetResolverName() - // - resolve endpoint's name with another resolver - - // If the first EDS update is an empty update, nothing is changing from the - // previous update (which is the default empty value). We need to explicitly - // handle first update being empty, and send a transient failure picker. - // - // TODO: define Equal() on type EndpointUpdate to avoid DeepEqual. And do - // the same for the other types. - if !edsImpl.respReceived && reflect.DeepEqual(edsResp, xdsclient.EndpointsUpdate{}) { - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(errAllPrioritiesRemoved)}) - } - edsImpl.respReceived = true - - edsImpl.updateDrops(edsResp.Drops) - - // Filter out all localities with weight 0. - // - // Locality weighted load balancer can be enabled by setting an option in - // CDS, and the weight of each locality. Currently, without the guarantee - // that CDS is always sent, we assume locality weighted load balance is - // always enabled, and ignore all weight 0 localities. - // - // In the future, we should look at the config in CDS response and decide - // whether locality weight matters. - newLocalitiesWithPriority := make(map[priorityType][]xdsclient.Locality) - for _, locality := range edsResp.Localities { - if locality.Weight == 0 { - continue - } - priority := newPriorityType(locality.Priority) - newLocalitiesWithPriority[priority] = append(newLocalitiesWithPriority[priority], locality) - } - - var ( - priorityLowest priorityType - priorityChanged bool - ) - - for priority, newLocalities := range newLocalitiesWithPriority { - if !priorityLowest.isSet() || priorityLowest.higherThan(priority) { - priorityLowest = priority - } - - bgwc, ok := edsImpl.priorityToLocalities[priority] - if !ok { - // Create balancer group if it's never created (this is the first - // time this priority is received). We don't start it here. It may - // be started when necessary (e.g. when higher is down, or if it's a - // new lowest priority). - ccPriorityWrapper := edsImpl.ccWrapperWithPriority(priority) - stateAggregator := weightedaggregator.New(ccPriorityWrapper, edsImpl.logger, newRandomWRR) - bgwc = &balancerGroupWithConfig{ - bg: balancergroup.New(ccPriorityWrapper, edsImpl.buildOpts, stateAggregator, edsImpl.loadReporter, edsImpl.logger), - stateAggregator: stateAggregator, - configs: make(map[xdsi.LocalityID]*localityConfig), - } - edsImpl.priorityToLocalities[priority] = bgwc - priorityChanged = true - edsImpl.logger.Infof("New priority %v added", priority) - } - edsImpl.handleEDSResponsePerPriority(bgwc, newLocalities) - } - edsImpl.priorityLowest = priorityLowest - - // Delete priorities that are removed in the latest response, and also close - // the balancer group. - for p, bgwc := range edsImpl.priorityToLocalities { - if _, ok := newLocalitiesWithPriority[p]; !ok { - delete(edsImpl.priorityToLocalities, p) - bgwc.bg.Close() - delete(edsImpl.priorityToState, p) - priorityChanged = true - edsImpl.logger.Infof("Priority %v deleted", p) - } - } - - // If priority was added/removed, it may affect the balancer group to use. - // E.g. priorityInUse was removed, or all priorities are down, and a new - // lower priority was added. - if priorityChanged { - edsImpl.handlePriorityChange() - } -} - -func (edsImpl *edsBalancerImpl) handleEDSResponsePerPriority(bgwc *balancerGroupWithConfig, newLocalities []xdsclient.Locality) { - // newLocalitiesSet contains all names of localities in the new EDS response - // for the same priority. It's used to delete localities that are removed in - // the new EDS response. - newLocalitiesSet := make(map[xdsi.LocalityID]struct{}) - var rebuildStateAndPicker bool - for _, locality := range newLocalities { - // One balancer for each locality. - - lid := locality.ID - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - newLocalitiesSet[lid] = struct{}{} - - newWeight := locality.Weight - var newAddrs []resolver.Address - for _, lbEndpoint := range locality.Endpoints { - // Filter out all "unhealthy" endpoints (unknown and - // healthy are both considered to be healthy: - // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if lbEndpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && - lbEndpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { - continue - } - - address := resolver.Address{ - Addr: lbEndpoint.Address, - } - if edsImpl.subBalancerBuilder.Name() == weightedroundrobin.Name && lbEndpoint.Weight != 0 { - ai := weightedroundrobin.AddrInfo{Weight: lbEndpoint.Weight} - address = weightedroundrobin.SetAddrInfo(address, ai) - // Metadata field in resolver.Address is deprecated. The - // attributes field should be used to specify arbitrary - // attributes about the address. We still need to populate the - // Metadata field here to allow users of this field to migrate - // to the new one. - // TODO(easwars): Remove this once all users have migrated. - // See https://github.com/grpc/grpc-go/issues/3563. - address.Metadata = &ai - } - newAddrs = append(newAddrs, address) - } - var weightChanged, addrsChanged bool - config, ok := bgwc.configs[lid] - if !ok { - // A new balancer, add it to balancer group and balancer map. - bgwc.stateAggregator.Add(lidJSON, newWeight) - bgwc.bg.Add(lidJSON, edsImpl.subBalancerBuilder) - config = &localityConfig{ - weight: newWeight, - } - bgwc.configs[lid] = config - - // weightChanged is false for new locality, because there's no need - // to update weight in bg. - addrsChanged = true - edsImpl.logger.Infof("New locality %v added", lid) - } else { - // Compare weight and addrs. - if config.weight != newWeight { - weightChanged = true - } - if !cmp.Equal(config.addrs, newAddrs) { - addrsChanged = true - } - edsImpl.logger.Infof("Locality %v updated, weightedChanged: %v, addrsChanged: %v", lid, weightChanged, addrsChanged) - } - - if weightChanged { - config.weight = newWeight - bgwc.stateAggregator.UpdateWeight(lidJSON, newWeight) - rebuildStateAndPicker = true - } - - if addrsChanged { - config.addrs = newAddrs - bgwc.bg.UpdateClientConnState(lidJSON, balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: newAddrs}, - }) - } - } - - // Delete localities that are removed in the latest response. - for lid := range bgwc.configs { - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - if _, ok := newLocalitiesSet[lid]; !ok { - bgwc.stateAggregator.Remove(lidJSON) - bgwc.bg.Remove(lidJSON) - delete(bgwc.configs, lid) - edsImpl.logger.Infof("Locality %v deleted", lid) - rebuildStateAndPicker = true - } - } - - if rebuildStateAndPicker { - bgwc.stateAggregator.BuildAndUpdate() - } -} - -// handleSubConnStateChange handles the state change and update pickers accordingly. -func (edsImpl *edsBalancerImpl) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - edsImpl.subConnMu.Lock() - var bgwc *balancerGroupWithConfig - if p, ok := edsImpl.subConnToPriority[sc]; ok { - if s == connectivity.Shutdown { - // Only delete sc from the map when state changed to Shutdown. - delete(edsImpl.subConnToPriority, sc) - } - bgwc = edsImpl.priorityToLocalities[p] - } - edsImpl.subConnMu.Unlock() - if bgwc == nil { - edsImpl.logger.Infof("edsBalancerImpl: priority not found for sc state change") - return - } - if bg := bgwc.bg; bg != nil { - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) - } -} - -// updateServiceRequestsConfig handles changes to the circuit breaking configuration. -func (edsImpl *edsBalancerImpl) updateServiceRequestsConfig(serviceName string, max *uint32) { - if !env.CircuitBreakingSupport { - return - } - edsImpl.pickerMu.Lock() - var updatePicker bool - if edsImpl.serviceRequestsCounter == nil || edsImpl.serviceRequestsCounter.ServiceName != serviceName { - edsImpl.serviceRequestsCounter = client.GetServiceRequestsCounter(serviceName) - updatePicker = true - } - - var newMax uint32 = defaultServiceRequestCountMax - if max != nil { - newMax = *max - } - if edsImpl.serviceRequestCountMax != newMax { - edsImpl.serviceRequestCountMax = newMax - updatePicker = true - } - if updatePicker && edsImpl.innerState.Picker != nil { - // Update picker with old inner picker, new counter and counterMax. - edsImpl.cc.UpdateState(balancer.State{ - ConnectivityState: edsImpl.innerState.ConnectivityState, - Picker: newDropPicker(edsImpl.innerState.Picker, edsImpl.drops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}, - ) - } - edsImpl.pickerMu.Unlock() -} - -func (edsImpl *edsBalancerImpl) updateClusterName(name string) { - edsImpl.clusterNameMu.Lock() - defer edsImpl.clusterNameMu.Unlock() - edsImpl.clusterName = name -} - -func (edsImpl *edsBalancerImpl) getClusterName() string { - edsImpl.clusterNameMu.Lock() - defer edsImpl.clusterNameMu.Unlock() - return edsImpl.clusterName -} - -// updateState first handles priority, and then wraps picker in a drop picker -// before forwarding the update. -func (edsImpl *edsBalancerImpl) updateState(priority priorityType, s balancer.State) { - _, ok := edsImpl.priorityToLocalities[priority] - if !ok { - edsImpl.logger.Infof("eds: received picker update from unknown priority") - return - } - - if edsImpl.handlePriorityWithNewState(priority, s) { - edsImpl.pickerMu.Lock() - defer edsImpl.pickerMu.Unlock() - edsImpl.innerState = s - // Don't reset drops when it's a state change. - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: newDropPicker(s.Picker, edsImpl.drops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}) - } -} - -func (edsImpl *edsBalancerImpl) ccWrapperWithPriority(priority priorityType) *edsBalancerWrapperCC { - return &edsBalancerWrapperCC{ - ClientConn: edsImpl.cc, - priority: priority, - parent: edsImpl, - } -} - -// edsBalancerWrapperCC implements the balancer.ClientConn API and get passed to -// each balancer group. It contains the locality priority. -type edsBalancerWrapperCC struct { - balancer.ClientConn - priority priorityType - parent *edsBalancerImpl -} - -func (ebwcc *edsBalancerWrapperCC) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - clusterName := ebwcc.parent.getClusterName() - newAddrs := make([]resolver.Address, len(addrs)) - for i, addr := range addrs { - newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) - } - return ebwcc.parent.newSubConn(ebwcc.priority, newAddrs, opts) -} - -func (ebwcc *edsBalancerWrapperCC) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - clusterName := ebwcc.parent.getClusterName() - newAddrs := make([]resolver.Address, len(addrs)) - for i, addr := range addrs { - newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) - } - ebwcc.ClientConn.UpdateAddresses(sc, newAddrs) -} - -func (ebwcc *edsBalancerWrapperCC) UpdateState(state balancer.State) { - ebwcc.parent.enqueueChildBalancerStateUpdate(ebwcc.priority, state) -} - -func (edsImpl *edsBalancerImpl) newSubConn(priority priorityType, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - sc, err := edsImpl.cc.NewSubConn(addrs, opts) - if err != nil { - return nil, err - } - edsImpl.subConnMu.Lock() - edsImpl.subConnToPriority[sc] = priority - edsImpl.subConnMu.Unlock() - return sc, nil -} - -// close closes the balancer. -func (edsImpl *edsBalancerImpl) close() { - for _, bgwc := range edsImpl.priorityToLocalities { - if bg := bgwc.bg; bg != nil { - bgwc.stateAggregator.Stop() - bg.Close() - } - } -} - -type dropPicker struct { - drops []*dropper - p balancer.Picker - loadStore load.PerClusterReporter - counter *client.ServiceRequestsCounter - countMax uint32 -} - -func newDropPicker(p balancer.Picker, drops []*dropper, loadStore load.PerClusterReporter, counter *client.ServiceRequestsCounter, countMax uint32) *dropPicker { - return &dropPicker{ - drops: drops, - p: p, - loadStore: loadStore, - counter: counter, - countMax: countMax, - } -} - -func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - var ( - drop bool - category string - ) - for _, dp := range d.drops { - if dp.drop() { - drop = true - category = dp.c.Category - break - } - } - if drop { - if d.loadStore != nil { - d.loadStore.CallDropped(category) - } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") - } - if d.counter != nil { - if err := d.counter.StartRequest(d.countMax); err != nil { - // Drops by circuit breaking are reported with empty category. They - // will be reported only in total drops, but not in per category. - if d.loadStore != nil { - d.loadStore.CallDropped("") - } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) - } - pr, err := d.p.Pick(info) - if err != nil { - d.counter.EndRequest() - return pr, err - } - oldDone := pr.Done - pr.Done = func(doneInfo balancer.DoneInfo) { - d.counter.EndRequest() - if oldDone != nil { - oldDone(doneInfo) - } - } - return pr, err - } - // TODO: (eds) don't drop unless the inner picker is READY. Similar to - // https://github.com/grpc/grpc-go/issues/2622. - return d.p.Pick(info) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_impl_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_impl_priority.go deleted file mode 100644 index 53ac6ef5e8..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_impl_priority.go +++ /dev/null @@ -1,358 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "errors" - "fmt" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/connectivity" -) - -var errAllPrioritiesRemoved = errors.New("eds: no locality is provided, all priorities are removed") - -// handlePriorityChange handles priority after EDS adds/removes a -// priority. -// -// - If all priorities were deleted, unset priorityInUse, and set parent -// ClientConn to TransientFailure -// - If priorityInUse wasn't set, this is either the first EDS resp, or the -// previous EDS resp deleted everything. Set priorityInUse to 0, and start 0. -// - If priorityInUse was deleted, send the picker from the new lowest priority -// to parent ClientConn, and set priorityInUse to the new lowest. -// - If priorityInUse has a non-Ready state, and also there's a priority lower -// than priorityInUse (which means a lower priority was added), set the next -// priority as new priorityInUse, and start the bg. -func (edsImpl *edsBalancerImpl) handlePriorityChange() { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - - // Everything was removed by EDS. - if !edsImpl.priorityLowest.isSet() { - edsImpl.priorityInUse = newPriorityTypeUnset() - // Stop the init timer. This can happen if the only priority is removed - // shortly after it's added. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(errAllPrioritiesRemoved)}) - return - } - - // priorityInUse wasn't set, use 0. - if !edsImpl.priorityInUse.isSet() { - edsImpl.logger.Infof("Switching priority from unset to %v", 0) - edsImpl.startPriority(newPriorityType(0)) - return - } - - // priorityInUse was deleted, use the new lowest. - if _, ok := edsImpl.priorityToLocalities[edsImpl.priorityInUse]; !ok { - oldP := edsImpl.priorityInUse - edsImpl.priorityInUse = edsImpl.priorityLowest - edsImpl.logger.Infof("Switching priority from %v to %v, because former was deleted", oldP, edsImpl.priorityInUse) - if s, ok := edsImpl.priorityToState[edsImpl.priorityLowest]; ok { - edsImpl.cc.UpdateState(*s) - } else { - // If state for priorityLowest is not found, this means priorityLowest was - // started, but never sent any update. The init timer fired and - // triggered the next priority. The old_priorityInUse (that was just - // deleted EDS) was picked later. - // - // We don't have an old state to send to parent, but we also don't - // want parent to keep using picker from old_priorityInUse. Send an - // update to trigger block picks until a new picker is ready. - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) - } - return - } - - // priorityInUse is not ready, look for next priority, and use if found. - if s, ok := edsImpl.priorityToState[edsImpl.priorityInUse]; ok && s.ConnectivityState != connectivity.Ready { - pNext := edsImpl.priorityInUse.nextLower() - if _, ok := edsImpl.priorityToLocalities[pNext]; ok { - edsImpl.logger.Infof("Switching priority from %v to %v, because latter was added, and former wasn't Ready") - edsImpl.startPriority(pNext) - } - } -} - -// startPriority sets priorityInUse to p, and starts the balancer group for p. -// It also starts a timer to fall to next priority after timeout. -// -// Caller must hold priorityMu, priority must exist, and edsImpl.priorityInUse -// must be non-nil. -func (edsImpl *edsBalancerImpl) startPriority(priority priorityType) { - edsImpl.priorityInUse = priority - p := edsImpl.priorityToLocalities[priority] - // NOTE: this will eventually send addresses to sub-balancers. If the - // sub-balancer tries to update picker, it will result in a deadlock on - // priorityMu in the update is handled synchronously. The deadlock is - // currently avoided by handling balancer update in a goroutine (the run - // goroutine in the parent eds balancer). When priority balancer is split - // into its own, this asynchronous state handling needs to be copied. - p.stateAggregator.Start() - p.bg.Start() - // startPriority can be called when - // 1. first EDS resp, start p0 - // 2. a high priority goes Failure, start next - // 3. a high priority init timeout, start next - // - // In all the cases, the existing init timer is either closed, also already - // expired. There's no need to close the old timer. - edsImpl.priorityInitTimer = time.AfterFunc(defaultPriorityInitTimeout, func() { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - if !edsImpl.priorityInUse.isSet() || !edsImpl.priorityInUse.equal(priority) { - return - } - edsImpl.priorityInitTimer = nil - pNext := priority.nextLower() - if _, ok := edsImpl.priorityToLocalities[pNext]; ok { - edsImpl.startPriority(pNext) - } - }) -} - -// handlePriorityWithNewState start/close priorities based on the connectivity -// state. It returns whether the state should be forwarded to parent ClientConn. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewState(priority priorityType, s balancer.State) bool { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - - if !edsImpl.priorityInUse.isSet() { - edsImpl.logger.Infof("eds: received picker update when no priority is in use (EDS returned an empty list)") - return false - } - - if edsImpl.priorityInUse.higherThan(priority) { - // Lower priorities should all be closed, this is an unexpected update. - edsImpl.logger.Infof("eds: received picker update from priority lower then priorityInUse") - return false - } - - bState, ok := edsImpl.priorityToState[priority] - if !ok { - bState = &balancer.State{} - edsImpl.priorityToState[priority] = bState - } - oldState := bState.ConnectivityState - *bState = s - - switch s.ConnectivityState { - case connectivity.Ready: - return edsImpl.handlePriorityWithNewStateReady(priority) - case connectivity.TransientFailure: - return edsImpl.handlePriorityWithNewStateTransientFailure(priority) - case connectivity.Connecting: - return edsImpl.handlePriorityWithNewStateConnecting(priority, oldState) - default: - // New state is Idle, should never happen. Don't forward. - return false - } -} - -// handlePriorityWithNewStateReady handles state Ready and decides whether to -// forward update or not. -// -// An update with state Ready: -// - If it's from higher priority: -// - Forward the update -// - Set the priority as priorityInUse -// - Close all priorities lower than this one -// - If it's from priorityInUse: -// - Forward and do nothing else -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateReady(priority priorityType) bool { - // If one priority higher or equal to priorityInUse goes Ready, stop the - // init timer. If update is from higher than priorityInUse, - // priorityInUse will be closed, and the init timer will become useless. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - - if edsImpl.priorityInUse.lowerThan(priority) { - edsImpl.logger.Infof("Switching priority from %v to %v, because latter became Ready", edsImpl.priorityInUse, priority) - edsImpl.priorityInUse = priority - for i := priority.nextLower(); !i.lowerThan(edsImpl.priorityLowest); i = i.nextLower() { - bgwc := edsImpl.priorityToLocalities[i] - bgwc.stateAggregator.Stop() - bgwc.bg.Close() - } - return true - } - return true -} - -// handlePriorityWithNewStateTransientFailure handles state TransientFailure and -// decides whether to forward update or not. -// -// An update with state Failure: -// - If it's from a higher priority: -// - Do not forward, and do nothing -// - If it's from priorityInUse: -// - If there's no lower: -// - Forward and do nothing else -// - If there's a lower priority: -// - Forward -// - Set lower as priorityInUse -// - Start lower -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateTransientFailure(priority priorityType) bool { - if edsImpl.priorityInUse.lowerThan(priority) { - return false - } - // priorityInUse sends a failure. Stop its init timer. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - pNext := priority.nextLower() - if _, okNext := edsImpl.priorityToLocalities[pNext]; !okNext { - return true - } - edsImpl.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, pNext) - edsImpl.startPriority(pNext) - return true -} - -// handlePriorityWithNewStateConnecting handles state Connecting and decides -// whether to forward update or not. -// -// An update with state Connecting: -// - If it's from a higher priority -// - Do nothing -// - If it's from priorityInUse, the behavior depends on previous state. -// -// When new state is Connecting, the behavior depends on previous state. If the -// previous state was Ready, this is a transition out from Ready to Connecting. -// Assuming there are multiple backends in the same priority, this mean we are -// in a bad situation and we should failover to the next priority (Side note: -// the current connectivity state aggregating algorhtim (e.g. round-robin) is -// not handling this right, because if many backends all go from Ready to -// Connecting, the overall situation is more like TransientFailure, not -// Connecting). -// -// If the previous state was Idle, we don't do anything special with failure, -// and simply forward the update. The init timer should be in process, will -// handle failover if it timeouts. If the previous state was TransientFailure, -// we do not forward, because the lower priority is in use. -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateConnecting(priority priorityType, oldState connectivity.State) bool { - if edsImpl.priorityInUse.lowerThan(priority) { - return false - } - - switch oldState { - case connectivity.Ready: - pNext := priority.nextLower() - if _, okNext := edsImpl.priorityToLocalities[pNext]; !okNext { - return true - } - edsImpl.logger.Infof("Switching priority from %v to %v, because former became Connecting from Ready", priority, pNext) - edsImpl.startPriority(pNext) - return true - case connectivity.Idle: - return true - case connectivity.TransientFailure: - return false - default: - // Old state is Connecting or Shutdown. Don't forward. - return false - } -} - -// priorityType represents the priority from EDS response. -// -// 0 is the highest priority. The bigger the number, the lower the priority. -type priorityType struct { - set bool - p uint32 -} - -func newPriorityType(p uint32) priorityType { - return priorityType{ - set: true, - p: p, - } -} - -func newPriorityTypeUnset() priorityType { - return priorityType{} -} - -func (p priorityType) isSet() bool { - return p.set -} - -func (p priorityType) equal(p2 priorityType) bool { - if !p.isSet() && !p2.isSet() { - return true - } - if !p.isSet() || !p2.isSet() { - return false - } - return p == p2 -} - -func (p priorityType) higherThan(p2 priorityType) bool { - if !p.isSet() || !p2.isSet() { - // TODO(menghanl): return an appropriate value instead of panic. - panic("priority unset") - } - return p.p < p2.p -} - -func (p priorityType) lowerThan(p2 priorityType) bool { - if !p.isSet() || !p2.isSet() { - // TODO(menghanl): return an appropriate value instead of panic. - panic("priority unset") - } - return p.p > p2.p -} - -func (p priorityType) nextLower() priorityType { - if !p.isSet() { - panic("priority unset") - } - return priorityType{ - set: true, - p: p.p + 1, - } -} - -func (p priorityType) String() string { - if !p.set { - return "Nil" - } - return fmt.Sprint(p.p) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_testutil.go b/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_testutil.go deleted file mode 100644 index 5e37cdcb47..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/eds_testutil.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "fmt" - "net" - "strconv" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" - typepb "github.com/envoyproxy/go-control-plane/envoy/type" - "google.golang.org/grpc/xds/internal" - xdsclient "google.golang.org/grpc/xds/internal/client" -) - -// parseEDSRespProtoForTesting parses EDS response, and panic if parsing fails. -// -// TODO: delete this. The EDS balancer tests should build an EndpointsUpdate -// directly, instead of building and parsing a proto message. -func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsclient.EndpointsUpdate { - u, err := parseEDSRespProto(m) - if err != nil { - panic(err.Error()) - } - return u -} - -// parseEDSRespProto turns EDS response proto message to EndpointsUpdate. -func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdate, error) { - ret := xdsclient.EndpointsUpdate{} - for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { - ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) - } - priorities := make(map[uint32]struct{}) - for _, locality := range m.Endpoints { - l := locality.GetLocality() - if l == nil { - return xdsclient.EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) - } - lid := internal.LocalityID{ - Region: l.Region, - Zone: l.Zone, - SubZone: l.SubZone, - } - priority := locality.GetPriority() - priorities[priority] = struct{}{} - ret.Localities = append(ret.Localities, xdsclient.Locality{ - ID: lid, - Endpoints: parseEndpoints(locality.GetLbEndpoints()), - Weight: locality.GetLoadBalancingWeight().GetValue(), - Priority: priority, - }) - } - for i := 0; i < len(priorities); i++ { - if _, ok := priorities[uint32(i)]; !ok { - return xdsclient.EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) - } - } - return ret, nil -} - -func parseAddress(socketAddress *corepb.SocketAddress) string { - return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) -} - -func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) xdsclient.OverloadDropConfig { - percentage := dropPolicy.GetDropPercentage() - var ( - numerator = percentage.GetNumerator() - denominator uint32 - ) - switch percentage.GetDenominator() { - case typepb.FractionalPercent_HUNDRED: - denominator = 100 - case typepb.FractionalPercent_TEN_THOUSAND: - denominator = 10000 - case typepb.FractionalPercent_MILLION: - denominator = 1000000 - } - return xdsclient.OverloadDropConfig{ - Category: dropPolicy.GetCategory(), - Numerator: numerator, - Denominator: denominator, - } -} - -func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsclient.Endpoint { - endpoints := make([]xdsclient.Endpoint, 0, len(lbEndpoints)) - for _, lbEndpoint := range lbEndpoints { - endpoints = append(endpoints, xdsclient.Endpoint{ - HealthStatus: xdsclient.EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), - Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), - }) - } - return endpoints -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/load_store_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/load_store_wrapper.go deleted file mode 100644 index 18904e47a4..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/load_store_wrapper.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package edsbalancer - -import ( - "sync" - - "google.golang.org/grpc/xds/internal/client/load" -) - -type loadStoreWrapper struct { - mu sync.RWMutex - service string - // Both store and perCluster will be nil if load reporting is disabled (EDS - // response doesn't have LRS server name). Note that methods on Store and - // perCluster all handle nil, so there's no need to check nil before calling - // them. - store *load.Store - perCluster load.PerClusterReporter -} - -func (lsw *loadStoreWrapper) updateServiceName(service string) { - lsw.mu.Lock() - defer lsw.mu.Unlock() - if lsw.service == service { - return - } - lsw.service = service - lsw.perCluster = lsw.store.PerCluster(lsw.service, "") -} - -func (lsw *loadStoreWrapper) updateLoadStore(store *load.Store) { - lsw.mu.Lock() - defer lsw.mu.Unlock() - if store == lsw.store { - return - } - lsw.store = store - lsw.perCluster = lsw.store.PerCluster(lsw.service, "") -} - -func (lsw *loadStoreWrapper) CallStarted(locality string) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallStarted(locality) - } -} - -func (lsw *loadStoreWrapper) CallFinished(locality string, err error) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallFinished(locality, err) - } -} - -func (lsw *loadStoreWrapper) CallServerLoad(locality, name string, val float64) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallServerLoad(locality, name, val) - } -} - -func (lsw *loadStoreWrapper) CallDropped(category string) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallDropped(category) - } -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/util.go b/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/util.go deleted file mode 100644 index 1329504264..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/util.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "google.golang.org/grpc/internal/wrr" - xdsclient "google.golang.org/grpc/xds/internal/client" -) - -var newRandomWRR = wrr.NewRandom - -type dropper struct { - c xdsclient.OverloadDropConfig - w wrr.WRR -} - -func newDropper(c xdsclient.OverloadDropConfig) *dropper { - w := newRandomWRR() - w.Add(true, int64(c.Numerator)) - w.Add(false, int64(c.Denominator-c.Numerator)) - - return &dropper{ - c: c, - w: w, - } -} - -func (d *dropper) drop() (ret bool) { - return d.w.Next().(bool) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/xds_old.go b/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/xds_old.go deleted file mode 100644 index 6729e6801f..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/edsbalancer/xds_old.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import "google.golang.org/grpc/balancer" - -// The old xds balancer implements logic for both CDS and EDS. With the new -// design, CDS is split and moved to a separate balancer, and the xds balancer -// becomes the EDS balancer. -// -// To keep the existing tests working, this file regisger EDS balancer under the -// old xds balancer name. -// -// TODO: delete this file when migration to new workflow (LDS, RDS, CDS, EDS) is -// done. - -const xdsName = "xds_experimental" - -func init() { - balancer.Register(&xdsBalancerBuilder{}) -} - -// xdsBalancerBuilder register edsBalancerBuilder (now with name -// "eds_experimental") under the old name "xds_experimental". -type xdsBalancerBuilder struct { - edsBalancerBuilder -} - -func (b *xdsBalancerBuilder) Name() string { - return xdsName -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go new file mode 100644 index 0000000000..8ce958d71c --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package loadstore contains the loadStoreWrapper shared by the balancers. +package loadstore + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +// NewWrapper creates a Wrapper. +func NewWrapper() *Wrapper { + return &Wrapper{} +} + +// Wrapper wraps a load store with cluster and edsService. +// +// It's store and cluster/edsService can be updated separately. And it will +// update its internal perCluster store so that new stats will be added to the +// correct perCluster. +// +// Note that this struct is a temporary walkaround before we implement graceful +// switch for EDS. Any update to the clusterName and serviceName is too early, +// the perfect timing is when the picker is updated with the new connection. +// This early update could cause picks for the old SubConn being reported to the +// new services. +// +// When the graceful switch in EDS is done, there should be no need for this +// struct. The policies that record/report load shouldn't need to handle update +// of lrsServerName/cluster/edsService. Its parent should do a graceful switch +// of the whole tree when one of that changes. +type Wrapper struct { + mu sync.RWMutex + cluster string + edsService string + // store and perCluster are initialized as nil. They are only set by the + // balancer when LRS is enabled. Before that, all functions to record loads + // are no-op. + store *load.Store + perCluster load.PerClusterReporter +} + +// UpdateClusterAndService updates the cluster name and eds service for this +// wrapper. If any one of them is changed from before, the perCluster store in +// this wrapper will also be updated. +func (lsw *Wrapper) UpdateClusterAndService(cluster, edsService string) { + lsw.mu.Lock() + defer lsw.mu.Unlock() + if cluster == lsw.cluster && edsService == lsw.edsService { + return + } + lsw.cluster = cluster + lsw.edsService = edsService + lsw.perCluster = lsw.store.PerCluster(lsw.cluster, lsw.edsService) +} + +// UpdateLoadStore updates the load store for this wrapper. If it is changed +// from before, the perCluster store in this wrapper will also be updated. +func (lsw *Wrapper) UpdateLoadStore(store *load.Store) { + lsw.mu.Lock() + defer lsw.mu.Unlock() + if store == lsw.store { + return + } + lsw.store = store + lsw.perCluster = lsw.store.PerCluster(lsw.cluster, lsw.edsService) +} + +// CallStarted records a call started in the store. +func (lsw *Wrapper) CallStarted(locality string) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallStarted(locality) + } +} + +// CallFinished records a call finished in the store. +func (lsw *Wrapper) CallFinished(locality string, err error) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallFinished(locality, err) + } +} + +// CallServerLoad records the server load in the store. +func (lsw *Wrapper) CallServerLoad(locality, name string, val float64) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallServerLoad(locality, name, val) + } +} + +// CallDropped records a call dropped in the store. +func (lsw *Wrapper) CallDropped(category string) { + lsw.mu.RLock() + defer lsw.mu.RUnlock() + if lsw.perCluster != nil { + lsw.perCluster.CallDropped(category) + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go new file mode 100644 index 0000000000..23e8aa7750 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -0,0 +1,253 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package priority implements the priority balancer. +// +// This balancer will be kept in internal until we use it in the xds balancers, +// and are confident its functionalities are stable. It will then be exported +// for more users. +package priority + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/balancergroup" +) + +// Name is the name of the priority balancer. +const Name = "priority_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &priorityBalancer{ + cc: cc, + done: grpcsync.NewEvent(), + childToPriority: make(map[string]int), + children: make(map[string]*childBalancer), + childBalancerStateUpdate: buffer.NewUnbounded(), + } + + b.logger = prefixLogger(b) + b.bg = balancergroup.New(cc, bOpts, b, nil, b.logger) + b.bg.Start() + go b.run() + b.logger.Infof("Created") + return b +} + +func (b bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(s) +} + +func (bb) Name() string { + return Name +} + +// timerWrapper wraps a timer with a boolean. So that when a race happens +// between AfterFunc and Stop, the func is guaranteed to not execute. +type timerWrapper struct { + stopped bool + timer *time.Timer +} + +type priorityBalancer struct { + logger *grpclog.PrefixLogger + cc balancer.ClientConn + bg *balancergroup.BalancerGroup + done *grpcsync.Event + childBalancerStateUpdate *buffer.Unbounded + + mu sync.Mutex + childInUse string + // priority of the child that's current in use. Int starting from 0, and 0 + // is the higher priority. + priorityInUse int + // priorities is a list of child names from higher to lower priority. + priorities []string + // childToPriority is a map from the child name to it's priority. Priority + // is an int start from 0, and 0 is the higher priority. + childToPriority map[string]int + // children is a map from child name to sub-balancers. + children map[string]*childBalancer + // The timer to give a priority some time to connect. And if the priority + // doesn't go into Ready/Failure, the next priority will be started. + // + // One timer is enough because there can be at most one priority in init + // state. + priorityInitTimer *timerWrapper +} + +func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + + b.mu.Lock() + defer b.mu.Unlock() + // Create and remove children, since we know all children from the config + // are used by some priority. + for name, newSubConfig := range newConfig.Children { + bb := balancer.Get(newSubConfig.Config.Name) + if bb == nil { + b.logger.Errorf("balancer name %v from config is not registered", newSubConfig.Config.Name) + continue + } + + currentChild, ok := b.children[name] + if !ok { + // This is a new child, add it to the children list. But note that + // the balancer isn't built, because this child can be a low + // priority. If necessary, it will be built when syncing priorities. + cb := newChildBalancer(name, b, bb) + cb.updateConfig(newSubConfig, resolver.State{ + Addresses: addressesSplit[name], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes, + }) + b.children[name] = cb + continue + } + + // This is not a new child. But the config/addresses could change. + + // The balancing policy name is changed, close the old child. But don't + // rebuild, rebuild will happen when syncing priorities. + if currentChild.bb.Name() != bb.Name() { + currentChild.stop() + currentChild.updateBuilder(bb) + } + + // Update config and address, but note that this doesn't send the + // updates to child balancer (the child balancer might not be built, if + // it's a low priority). + currentChild.updateConfig(newSubConfig, resolver.State{ + Addresses: addressesSplit[name], + ServiceConfig: s.ResolverState.ServiceConfig, + Attributes: s.ResolverState.Attributes, + }) + } + + // Remove child from children if it's not in new config. + for name, oldChild := range b.children { + if _, ok := newConfig.Children[name]; !ok { + oldChild.stop() + } + } + + // Update priorities and handle priority changes. + b.priorities = newConfig.Priorities + b.childToPriority = make(map[string]int, len(newConfig.Priorities)) + for pi, pName := range newConfig.Priorities { + b.childToPriority[pName] = pi + } + // Sync the states of all children to the new updated priorities. This + // include starting/stopping child balancers when necessary. + b.syncPriority() + + return nil +} + +func (b *priorityBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *priorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.bg.UpdateSubConnState(sc, state) +} + +func (b *priorityBalancer) Close() { + b.bg.Close() + + b.mu.Lock() + defer b.mu.Unlock() + b.done.Fire() + // Clear states of the current child in use, so if there's a race in picker + // update, it will be dropped. + b.childInUse = "" + b.stopPriorityInitTimer() +} + +func (b *priorityBalancer) ExitIdle() { + b.bg.ExitIdle() +} + +// stopPriorityInitTimer stops the priorityInitTimer if it's not nil, and set it +// to nil. +// +// Caller must hold b.mu. +func (b *priorityBalancer) stopPriorityInitTimer() { + timerW := b.priorityInitTimer + if timerW == nil { + return + } + b.priorityInitTimer = nil + timerW.stopped = true + timerW.timer.Stop() +} + +// UpdateState implements balancergroup.BalancerStateAggregator interface. The +// balancer group sends new connectivity state and picker here. +func (b *priorityBalancer) UpdateState(childName string, state balancer.State) { + b.childBalancerStateUpdate.Put(&childBalancerState{ + name: childName, + s: state, + }) +} + +type childBalancerState struct { + name string + s balancer.State +} + +// run handles child update in a separate goroutine, so if the child sends +// updates inline (when called by parent), it won't cause deadlocks (by trying +// to hold the same mutex). +func (b *priorityBalancer) run() { + for { + select { + case u := <-b.childBalancerStateUpdate.Get(): + b.childBalancerStateUpdate.Load() + s := u.(*childBalancerState) + // Needs to handle state update in a goroutine, because each state + // update needs to start/close child policy, could result in + // deadlock. + b.handleChildStateUpdate(s.name, s.s) + case <-b.done.Done(): + return + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go new file mode 100644 index 0000000000..600705da01 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_child.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +type childBalancer struct { + name string + parent *priorityBalancer + bb *ignoreResolveNowBalancerBuilder + + ignoreReresolutionRequests bool + config serviceconfig.LoadBalancingConfig + rState resolver.State + + started bool + state balancer.State +} + +// newChildBalancer creates a child balancer place holder, but doesn't +// build/start the child balancer. +func newChildBalancer(name string, parent *priorityBalancer, bb balancer.Builder) *childBalancer { + return &childBalancer{ + name: name, + parent: parent, + bb: newIgnoreResolveNowBalancerBuilder(bb, false), + started: false, + // Start with the connecting state and picker with re-pick error, so + // that when a priority switch causes this child picked before it's + // balancing policy is created, a re-pick will happen. + state: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + } +} + +// updateBuilder updates builder for the child, but doesn't build. +func (cb *childBalancer) updateBuilder(bb balancer.Builder) { + cb.bb = newIgnoreResolveNowBalancerBuilder(bb, cb.ignoreReresolutionRequests) +} + +// updateConfig sets childBalancer's config and state, but doesn't send update to +// the child balancer. +func (cb *childBalancer) updateConfig(child *Child, rState resolver.State) { + cb.ignoreReresolutionRequests = child.IgnoreReresolutionRequests + cb.config = child.Config.Config + cb.rState = rState +} + +// start builds the child balancer if it's not already started. +// +// It doesn't do it directly. It asks the balancer group to build it. +func (cb *childBalancer) start() { + if cb.started { + return + } + cb.started = true + cb.parent.bg.Add(cb.name, cb.bb) +} + +// sendUpdate sends the addresses and config to the child balancer. +func (cb *childBalancer) sendUpdate() { + cb.bb.updateIgnoreResolveNow(cb.ignoreReresolutionRequests) + // TODO: return and aggregate the returned error in the parent. + err := cb.parent.bg.UpdateClientConnState(cb.name, balancer.ClientConnState{ + ResolverState: cb.rState, + BalancerConfig: cb.config, + }) + if err != nil { + cb.parent.logger.Warningf("failed to update ClientConn state for child %v: %v", cb.name, err) + } +} + +// stop stops the child balancer and resets the state. +// +// It doesn't do it directly. It asks the balancer group to remove it. +// +// Note that the underlying balancer group could keep the child in a cache. +func (cb *childBalancer) stop() { + if !cb.started { + return + } + cb.parent.bg.Remove(cb.name) + cb.started = false + cb.state = balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go new file mode 100644 index 0000000000..bd2c6724ea --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go @@ -0,0 +1,361 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "errors" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" +) + +var ( + // ErrAllPrioritiesRemoved is returned by the picker when there's no priority available. + ErrAllPrioritiesRemoved = errors.New("no priority is provided, all priorities are removed") + // DefaultPriorityInitTimeout is the timeout after which if a priority is + // not READY, the next will be started. It's exported to be overridden by + // tests. + DefaultPriorityInitTimeout = 10 * time.Second +) + +// syncPriority handles priority after a config update. It makes sure the +// balancer state (started or not) is in sync with the priorities (even in +// tricky cases where a child is moved from a priority to another). +// +// It's guaranteed that after this function returns: +// - If some child is READY, it is childInUse, and all lower priorities are +// closed. +// - If some child is newly started(in Connecting for the first time), it is +// childInUse, and all lower priorities are closed. +// - Otherwise, the lowest priority is childInUse (none of the children is +// ready, and the overall state is not ready). +// +// Steps: +// - If all priorities were deleted, unset childInUse (to an empty string), and +// set parent ClientConn to TransientFailure +// - Otherwise, Scan all children from p0, and check balancer stats: +// - For any of the following cases: +// - If balancer is not started (not built), this is either a new child +// with high priority, or a new builder for an existing child. +// - If balancer is READY +// - If this is the lowest priority +// - do the following: +// - if this is not the old childInUse, override picker so old picker is no +// longer used. +// - switch to it (because all higher priorities are neither new or Ready) +// - forward the new addresses and config +// +// Caller must hold b.mu. +func (b *priorityBalancer) syncPriority() { + // Everything was removed by the update. + if len(b.priorities) == 0 { + b.childInUse = "" + b.priorityInUse = 0 + // Stop the init timer. This can happen if the only priority is removed + // shortly after it's added. + b.stopPriorityInitTimer() + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), + }) + return + } + + for p, name := range b.priorities { + child, ok := b.children[name] + if !ok { + b.logger.Errorf("child with name %q is not found in children", name) + continue + } + + if !child.started || + child.state.ConnectivityState == connectivity.Ready || + p == len(b.priorities)-1 { + if b.childInUse != "" && b.childInUse != child.name { + // childInUse was set and is different from this child, will + // change childInUse later. We need to update picker here + // immediately so parent stops using the old picker. + b.cc.UpdateState(child.state) + } + b.logger.Infof("switching to (%q, %v) in syncPriority", child.name, p) + b.switchToChild(child, p) + child.sendUpdate() + break + } + } +} + +// Stop priorities [p+1, lowest]. +// +// Caller must hold b.mu. +func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { + for i := p + 1; i < len(b.priorities); i++ { + name := b.priorities[i] + child, ok := b.children[name] + if !ok { + b.logger.Errorf("child with name %q is not found in children", name) + continue + } + child.stop() + } +} + +// switchToChild does the following: +// - stop all child with lower priorities +// - if childInUse is not this child +// - set childInUse to this child +// - stops init timer +// - if this child is not started, start it, and start a init timer +// +// Note that it does NOT send the current child state (picker) to the parent +// ClientConn. The caller needs to send it if necessary. +// +// this can be called when +// 1. first update, start p0 +// 2. an update moves a READY child from a lower priority to higher +// 2. a different builder is updated for this child +// 3. a high priority goes Failure, start next +// 4. a high priority init timeout, start next +// +// Caller must hold b.mu. +func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { + // Stop lower priorities even if childInUse is same as this child. It's + // possible this child was moved from a priority to another. + b.stopSubBalancersLowerThanPriority(priority) + + // If this child is already in use, do nothing. + // + // This can happen: + // - all priorities are not READY, an config update always triggers switch + // to the lowest. In this case, the lowest child could still be connecting, + // so we don't stop the init timer. + // - a high priority is READY, an config update always triggers switch to + // it. + if b.childInUse == child.name && child.started { + return + } + b.childInUse = child.name + b.priorityInUse = priority + + // Init timer is always for childInUse. Since we are switching to a + // different child, we will stop the init timer no matter what. If this + // child is not started, we will start the init timer later. + b.stopPriorityInitTimer() + + if !child.started { + child.start() + // Need this local variable to capture timerW in the AfterFunc closure + // to check the stopped boolean. + timerW := &timerWrapper{} + b.priorityInitTimer = timerW + timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { + b.mu.Lock() + defer b.mu.Unlock() + if timerW.stopped { + return + } + b.priorityInitTimer = nil + // Switch to the next priority if there's any. + if pNext := priority + 1; pNext < len(b.priorities) { + nameNext := b.priorities[pNext] + if childNext, ok := b.children[nameNext]; ok { + b.switchToChild(childNext, pNext) + childNext.sendUpdate() + } + } + }) + } +} + +// handleChildStateUpdate start/close priorities based on the connectivity +// state. +func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.State) { + b.mu.Lock() + defer b.mu.Unlock() + if b.done.HasFired() { + return + } + + priority, ok := b.childToPriority[childName] + if !ok { + b.logger.Errorf("priority: received picker update with unknown child %v", childName) + return + } + + if b.childInUse == "" { + b.logger.Errorf("priority: no child is in use when picker update is received") + return + } + + // priorityInUse is higher than this priority. + if b.priorityInUse < priority { + // Lower priorities should all be closed, this is an unexpected update. + // Can happen if the child policy sends an update after we tell it to + // close. + b.logger.Warningf("priority: received picker update from priority %v, lower than priority in use %v", priority, b.priorityInUse) + return + } + + // Update state in child. The updated picker will be sent to parent later if + // necessary. + child, ok := b.children[childName] + if !ok { + b.logger.Errorf("priority: child balancer not found for child %v, priority %v", childName, priority) + return + } + oldState := child.state.ConnectivityState + child.state = s + + switch s.ConnectivityState { + case connectivity.Ready, connectivity.Idle: + // Note that idle is also handled as if it's Ready. It will close the + // lower priorities (which will be kept in a cache, not deleted), and + // new picks will use the Idle picker. + b.handlePriorityWithNewStateReady(child, priority) + case connectivity.TransientFailure: + b.handlePriorityWithNewStateTransientFailure(child, priority) + case connectivity.Connecting: + b.handlePriorityWithNewStateConnecting(child, priority, oldState) + default: + // New state is Shutdown, should never happen. Don't forward. + } +} + +// handlePriorityWithNewStateReady handles state Ready from a higher or equal +// priority. +// +// An update with state Ready: +// - If it's from higher priority: +// - Switch to this priority +// - Forward the update +// - If it's from priorityInUse: +// - Forward only +// +// Caller must make sure priorityInUse is not higher than priority. +// +// Caller must hold mu. +func (b *priorityBalancer) handlePriorityWithNewStateReady(child *childBalancer, priority int) { + // If one priority higher or equal to priorityInUse goes Ready, stop the + // init timer. If update is from higher than priorityInUse, priorityInUse + // will be closed, and the init timer will become useless. + b.stopPriorityInitTimer() + + // priorityInUse is lower than this priority, switch to this. + if b.priorityInUse > priority { + b.logger.Infof("Switching priority from %v to %v, because latter became Ready", b.priorityInUse, priority) + b.switchToChild(child, priority) + } + // Forward the update since it's READY. + b.cc.UpdateState(child.state) +} + +// handlePriorityWithNewStateTransientFailure handles state TransientFailure +// from a higher or equal priority. +// +// An update with state TransientFailure: +// - If it's from a higher priority: +// - Do not forward, and do nothing +// - If it's from priorityInUse: +// - If there's no lower: +// - Forward and do nothing else +// - If there's a lower priority: +// - Switch to the lower +// - Forward the lower child's state +// - Do NOT forward this update +// +// Caller must make sure priorityInUse is not higher than priority. +// +// Caller must hold mu. +func (b *priorityBalancer) handlePriorityWithNewStateTransientFailure(child *childBalancer, priority int) { + // priorityInUse is lower than this priority, do nothing. + if b.priorityInUse > priority { + return + } + // priorityInUse sends a failure. Stop its init timer. + b.stopPriorityInitTimer() + priorityNext := priority + 1 + if priorityNext >= len(b.priorities) { + // Forward this update. + b.cc.UpdateState(child.state) + return + } + b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) + nameNext := b.priorities[priorityNext] + childNext := b.children[nameNext] + b.switchToChild(childNext, priorityNext) + b.cc.UpdateState(childNext.state) + childNext.sendUpdate() +} + +// handlePriorityWithNewStateConnecting handles state Connecting from a higher +// than or equal priority. +// +// An update with state Connecting: +// - If it's from a higher priority +// - Do nothing +// - If it's from priorityInUse, the behavior depends on previous state. +// +// When new state is Connecting, the behavior depends on previous state. If the +// previous state was Ready, this is a transition out from Ready to Connecting. +// Assuming there are multiple backends in the same priority, this mean we are +// in a bad situation and we should failover to the next priority (Side note: +// the current connectivity state aggregating algorithm (e.g. round-robin) is +// not handling this right, because if many backends all go from Ready to +// Connecting, the overall situation is more like TransientFailure, not +// Connecting). +// +// If the previous state was Idle, we don't do anything special with failure, +// and simply forward the update. The init timer should be in process, will +// handle failover if it timeouts. If the previous state was TransientFailure, +// we do not forward, because the lower priority is in use. +// +// Caller must make sure priorityInUse is not higher than priority. +// +// Caller must hold mu. +func (b *priorityBalancer) handlePriorityWithNewStateConnecting(child *childBalancer, priority int, oldState connectivity.State) { + // priorityInUse is lower than this priority, do nothing. + if b.priorityInUse > priority { + return + } + + switch oldState { + case connectivity.Ready: + // Handling transition from Ready to Connecting, is same as handling + // TransientFailure. There's no need to stop the init timer, because it + // should have been stopped when state turned Ready. + priorityNext := priority + 1 + if priorityNext >= len(b.priorities) { + // Forward this update. + b.cc.UpdateState(child.state) + return + } + b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) + nameNext := b.priorities[priorityNext] + childNext := b.children[nameNext] + b.switchToChild(childNext, priorityNext) + b.cc.UpdateState(childNext.state) + childNext.sendUpdate() + case connectivity.Idle: + b.cc.UpdateState(child.state) + default: + // Old state is Connecting, TransientFailure or Shutdown. Don't forward. + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go new file mode 100644 index 0000000000..37f1c9a829 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/config.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "encoding/json" + "fmt" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// Child is a child of priority balancer. +type Child struct { + Config *internalserviceconfig.BalancerConfig `json:"config,omitempty"` + IgnoreReresolutionRequests bool `json:"ignoreReresolutionRequests,omitempty"` +} + +// LBConfig represents priority balancer's config. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // Children is a map from the child balancer names to their configs. Child + // names can be found in field Priorities. + Children map[string]*Child `json:"children,omitempty"` + // Priorities is a list of child balancer names. They are sorted from + // highest priority to low. The type/config for each child can be found in + // field Children, with the balancer name as the key. + Priorities []string `json:"priorities,omitempty"` +} + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + + prioritiesSet := make(map[string]bool) + for _, name := range cfg.Priorities { + if _, ok := cfg.Children[name]; !ok { + return nil, fmt.Errorf("LB policy name %q found in Priorities field (%v) is not found in Children field (%+v)", name, cfg.Priorities, cfg.Children) + } + prioritiesSet[name] = true + } + for name := range cfg.Children { + if _, ok := prioritiesSet[name]; !ok { + return nil, fmt.Errorf("LB policy name %q found in Children field (%v) is not found in Priorities field (%+v)", name, cfg.Children, cfg.Priorities) + } + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go new file mode 100644 index 0000000000..9a9f477726 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/ignore_resolve_now.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +type ignoreResolveNowBalancerBuilder struct { + balancer.Builder + ignoreResolveNow *uint32 +} + +// If `ignore` is true, all `ResolveNow()` from the balancer built from this +// builder will be ignored. +// +// `ignore` can be updated later by `updateIgnoreResolveNow`, and the update +// will be propagated to all the old and new balancers built with this. +func newIgnoreResolveNowBalancerBuilder(bb balancer.Builder, ignore bool) *ignoreResolveNowBalancerBuilder { + ret := &ignoreResolveNowBalancerBuilder{ + Builder: bb, + ignoreResolveNow: new(uint32), + } + ret.updateIgnoreResolveNow(ignore) + return ret +} + +func (irnbb *ignoreResolveNowBalancerBuilder) updateIgnoreResolveNow(b bool) { + if b { + atomic.StoreUint32(irnbb.ignoreResolveNow, 1) + return + } + atomic.StoreUint32(irnbb.ignoreResolveNow, 0) + +} + +func (irnbb *ignoreResolveNowBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return irnbb.Builder.Build(&ignoreResolveNowClientConn{ + ClientConn: cc, + ignoreResolveNow: irnbb.ignoreResolveNow, + }, opts) +} + +type ignoreResolveNowClientConn struct { + balancer.ClientConn + ignoreResolveNow *uint32 +} + +func (i ignoreResolveNowClientConn) ResolveNow(o resolver.ResolveNowOptions) { + if atomic.LoadUint32(i.ignoreResolveNow) != 0 { + return + } + i.ClientConn.ResolveNow(o) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go new file mode 100644 index 0000000000..2fb8d2d204 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[priority-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *priorityBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go similarity index 72% rename from vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go rename to vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go index a6144cd661..45fbe76443 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/syscallconn_appengine.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/utils.go @@ -1,8 +1,6 @@ -// +build appengine - /* * - * Copyright 2018 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +16,16 @@ * */ -package credentials - -import ( - "net" -) +package priority -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go new file mode 100644 index 0000000000..5cb4aab3d9 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/config.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/serviceconfig" +) + +// LBConfig is the balancer config for ring_hash balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + MinRingSize uint64 `json:"minRingSize,omitempty"` + MaxRingSize uint64 `json:"maxRingSize,omitempty"` +} + +const ( + defaultMinSize = 1024 + defaultMaxSize = 8 * 1024 * 1024 // 8M +) + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + if cfg.MinRingSize == 0 { + cfg.MinRingSize = defaultMinSize + } + if cfg.MaxRingSize == 0 { + cfg.MaxRingSize = defaultMaxSize + } + if cfg.MinRingSize > cfg.MaxRingSize { + return nil, fmt.Errorf("min %v is greater than max %v", cfg.MinRingSize, cfg.MaxRingSize) + } + return &cfg, nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go new file mode 100644 index 0000000000..64a1d467f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/logging.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[ring-hash-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *ringhashBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go new file mode 100644 index 0000000000..dcea6d46e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/status" +) + +type picker struct { + ring *ring + logger *grpclog.PrefixLogger +} + +func newPicker(ring *ring, logger *grpclog.PrefixLogger) *picker { + return &picker{ring: ring, logger: logger} +} + +// handleRICSResult is the return type of handleRICS. It's needed to wrap the +// returned error from Pick() in a struct. With this, if the return values are +// `balancer.PickResult, error, bool`, linter complains because error is not the +// last return value. +type handleRICSResult struct { + pr balancer.PickResult + err error +} + +// handleRICS generates pick result if the entry is in Ready, Idle, Connecting +// or Shutdown. TransientFailure will be handled specifically after this +// function returns. +// +// The first return value indicates if the state is in Ready, Idle, Connecting +// or Shutdown. If it's true, the PickResult and error should be returned from +// Pick() as is. +func (p *picker) handleRICS(e *ringEntry) (handleRICSResult, bool) { + switch state := e.sc.effectiveState(); state { + case connectivity.Ready: + return handleRICSResult{pr: balancer.PickResult{SubConn: e.sc.sc}}, true + case connectivity.Idle: + // Trigger Connect() and queue the pick. + e.sc.queueConnect() + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.Connecting: + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.TransientFailure: + // Return ok==false, so TransientFailure will be handled afterwards. + return handleRICSResult{}, false + case connectivity.Shutdown: + // Shutdown can happen in a race where the old picker is called. A new + // picker should already be sent. + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + default: + // Should never reach this. All the connectivity states are already + // handled in the cases. + p.logger.Errorf("SubConn has undefined connectivity state: %v", state) + return handleRICSResult{err: status.Errorf(codes.Unavailable, "SubConn has undefined connectivity state: %v", state)}, true + } +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + e := p.ring.pick(getRequestHash(info.Ctx)) + if hr, ok := p.handleRICS(e); ok { + return hr.pr, hr.err + } + // ok was false, the entry is in transient failure. + return p.handleTransientFailure(e) +} + +func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, error) { + // Queue a connect on the first picked SubConn. + e.sc.queueConnect() + + // Find next entry in the ring, skipping duplicate SubConns. + e2 := nextSkippingDuplicates(p.ring, e) + if e2 == nil { + // There's no next entry available, fail the pick. + return balancer.PickResult{}, fmt.Errorf("the only SubConn is in Transient Failure") + } + + // For the second SubConn, also check Ready/Idle/Connecting as if it's the + // first entry. + if hr, ok := p.handleRICS(e2); ok { + return hr.pr, hr.err + } + + // The second SubConn is also in TransientFailure. Queue a connect on it. + e2.sc.queueConnect() + + // If it gets here, this is after the second SubConn, and the second SubConn + // was in TransientFailure. + // + // Loop over all other SubConns: + // - If all SubConns so far are all TransientFailure, trigger Connect() on + // the TransientFailure SubConns, and keep going. + // - If there's one SubConn that's not in TransientFailure, keep checking + // the remaining SubConns (in case there's a Ready, which will be returned), + // but don't not trigger Connect() on the other SubConns. + var firstNonFailedFound bool + for ee := nextSkippingDuplicates(p.ring, e2); ee != e; ee = nextSkippingDuplicates(p.ring, ee) { + scState := ee.sc.effectiveState() + if scState == connectivity.Ready { + return balancer.PickResult{SubConn: ee.sc.sc}, nil + } + if firstNonFailedFound { + continue + } + if scState == connectivity.TransientFailure { + // This will queue a connect. + ee.sc.queueConnect() + continue + } + // This is a SubConn in a non-failure state. We continue to check the + // other SubConns, but remember that there was a non-failed SubConn + // seen. After this, Pick() will never trigger any SubConn to Connect(). + firstNonFailedFound = true + if scState == connectivity.Idle { + // This is the first non-failed SubConn, and it is in a real Idle + // state. Trigger it to Connect(). + ee.sc.queueConnect() + } + } + return balancer.PickResult{}, fmt.Errorf("no connection is Ready") +} + +func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { + for next := ring.next(entry); next != entry; next = ring.next(next) { + if next.sc != entry.sc { + return next + } + } + // There's no qualifying next entry. + return nil +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go new file mode 100644 index 0000000000..68e844cfb4 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + "math" + "sort" + "strconv" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/resolver" +) + +type ring struct { + items []*ringEntry +} + +type subConnWithWeight struct { + sc *subConn + weight float64 +} + +type ringEntry struct { + idx int + hash uint64 + sc *subConn +} + +// newRing creates a ring from the subConns. The ring size is limited by the +// passed in max/min. +// +// ring entries will be created for each subConn, and subConn with high weight +// (specified by the address) may have multiple entries. +// +// For example, for subConns with weights {a:3, b:3, c:4}, a generated ring of +// size 10 could be: +// - {idx:0 hash:3689675255460411075 b} +// - {idx:1 hash:4262906501694543955 c} +// - {idx:2 hash:5712155492001633497 c} +// - {idx:3 hash:8050519350657643659 b} +// - {idx:4 hash:8723022065838381142 b} +// - {idx:5 hash:11532782514799973195 a} +// - {idx:6 hash:13157034721563383607 c} +// - {idx:7 hash:14468677667651225770 c} +// - {idx:8 hash:17336016884672388720 a} +// - {idx:9 hash:18151002094784932496 a} +// +// To pick from a ring, a binary search will be done for the given target hash, +// and first item with hash >= given hash will be returned. +func newRing(subConns map[resolver.Address]*subConn, minRingSize, maxRingSize uint64) (*ring, error) { + // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 + normalizedWeights, minWeight, err := normalizeWeights(subConns) + if err != nil { + return nil, err + } + // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. + + // Scale up the size of the ring such that the least-weighted host gets a + // whole number of hashes on the ring. + // + // Note that size is limited by the input max/min. + scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) + ringSize := math.Ceil(scale) + items := make([]*ringEntry, 0, int(ringSize)) + + // For each entry, scale*weight nodes are generated in the ring. + // + // Not all of these are whole numbers. E.g. for weights {a:3,b:3,c:4}, if + // ring size is 7, scale is 6.66. The numbers of nodes will be + // {a,a,b,b,c,c,c}. + // + // A hash is generated for each item, and later the results will be sorted + // based on the hash. + var ( + idx int + targetIdx float64 + ) + for _, scw := range normalizedWeights { + targetIdx += scale * scw.weight + for float64(idx) < targetIdx { + h := xxhash.Sum64String(scw.sc.addr + strconv.Itoa(len(items))) + items = append(items, &ringEntry{idx: idx, hash: h, sc: scw.sc}) + idx++ + } + } + + // Sort items based on hash, to prepare for binary search. + sort.Slice(items, func(i, j int) bool { return items[i].hash < items[j].hash }) + for i, ii := range items { + ii.idx = i + } + return &ring{items: items}, nil +} + +// normalizeWeights divides all the weights by the sum, so that the total weight +// is 1. +func normalizeWeights(subConns map[resolver.Address]*subConn) (_ []subConnWithWeight, min float64, _ error) { + if len(subConns) == 0 { + return nil, 0, fmt.Errorf("number of subconns is 0") + } + var weightSum uint32 + for a := range subConns { + // The address weight was moved from attributes to the Metadata field. + // This is necessary (all the attributes need to be stripped) for the + // balancer to detect identical {address+weight} combination. + weightSum += a.Metadata.(uint32) + } + if weightSum == 0 { + return nil, 0, fmt.Errorf("total weight of all subconns is 0") + } + weightSumF := float64(weightSum) + ret := make([]subConnWithWeight, 0, len(subConns)) + min = math.MaxFloat64 + for a, sc := range subConns { + nw := float64(a.Metadata.(uint32)) / weightSumF + ret = append(ret, subConnWithWeight{sc: sc, weight: nw}) + if nw < min { + min = nw + } + } + // Sort the addresses to return consistent results. + // + // Note: this might not be necessary, but this makes sure the ring is + // consistent as long as the addresses are the same, for example, in cases + // where an address is added and then removed, the RPCs will still pick the + // same old SubConn. + sort.Slice(ret, func(i, j int) bool { return ret[i].sc.addr < ret[j].sc.addr }) + return ret, min, nil +} + +// pick does a binary search. It returns the item with smallest index i that +// r.items[i].hash >= h. +func (r *ring) pick(h uint64) *ringEntry { + i := sort.Search(len(r.items), func(i int) bool { return r.items[i].hash >= h }) + if i == len(r.items) { + // If not found, and h is greater than the largest hash, return the + // first item. + i = 0 + } + return r.items[i] +} + +// next returns the next entry. +func (r *ring) next(e *ringEntry) *ringEntry { + return r.items[(e.idx+1)%len(r.items)] +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go new file mode 100644 index 0000000000..f8a47f165b --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -0,0 +1,434 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package ringhash implements the ringhash balancer. +package ringhash + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the ring_hash balancer. +const Name = "ring_hash_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &ringhashBalancer{ + cc: cc, + subConns: make(map[resolver.Address]*subConn), + scStates: make(map[balancer.SubConn]*subConn), + csEvltr: &connectivityStateEvaluator{}, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type subConn struct { + addr string + sc balancer.SubConn + + mu sync.RWMutex + // This is the actual state of this SubConn (as updated by the ClientConn). + // The effective state can be different, see comment of attemptedToConnect. + state connectivity.State + // failing is whether this SubConn is in a failing state. A subConn is + // considered to be in a failing state if it was previously in + // TransientFailure. + // + // This affects the effective connectivity state of this SubConn, e.g. + // - if the actual state is Idle or Connecting, but this SubConn is failing, + // the effective state is TransientFailure. + // + // This is used in pick(). E.g. if a subConn is Idle, but has failing as + // true, pick() will + // - consider this SubConn as TransientFailure, and check the state of the + // next SubConn. + // - trigger Connect() (note that normally a SubConn in real + // TransientFailure cannot Connect()) + // + // A subConn starts in non-failing (failing is false). A transition to + // TransientFailure sets failing to true (and it stays true). A transition + // to Ready sets failing to false. + failing bool + // connectQueued is true if a Connect() was queued for this SubConn while + // it's not in Idle (most likely was in TransientFailure). A Connect() will + // be triggered on this SubConn when it turns Idle. + // + // When connectivity state is updated to Idle for this SubConn, if + // connectQueued is true, Connect() will be called on the SubConn. + connectQueued bool +} + +// setState updates the state of this SubConn. +// +// It also handles the queued Connect(). If the new state is Idle, and a +// Connect() was queued, this SubConn will be triggered to Connect(). +func (sc *subConn) setState(s connectivity.State) { + sc.mu.Lock() + defer sc.mu.Unlock() + switch s { + case connectivity.Idle: + // Trigger Connect() if new state is Idle, and there is a queued connect. + if sc.connectQueued { + sc.connectQueued = false + sc.sc.Connect() + } + case connectivity.Connecting: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + case connectivity.Ready: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + // Set to a non-failing state. + sc.failing = false + case connectivity.TransientFailure: + // Set to a failing state. + sc.failing = true + } + sc.state = s +} + +// effectiveState returns the effective state of this SubConn. It can be +// different from the actual state, e.g. Idle while the subConn is failing is +// considered TransientFailure. Read comment of field failing for other cases. +func (sc *subConn) effectiveState() connectivity.State { + sc.mu.RLock() + defer sc.mu.RUnlock() + if sc.failing && (sc.state == connectivity.Idle || sc.state == connectivity.Connecting) { + return connectivity.TransientFailure + } + return sc.state +} + +// queueConnect sets a boolean so that when the SubConn state changes to Idle, +// it's Connect() will be triggered. If the SubConn state is already Idle, it +// will just call Connect(). +func (sc *subConn) queueConnect() { + sc.mu.Lock() + defer sc.mu.Unlock() + if sc.state == connectivity.Idle { + sc.sc.Connect() + return + } + // Queue this connect, and when this SubConn switches back to Idle (happens + // after backoff in TransientFailure), it will Connect(). + sc.connectQueued = true +} + +type ringhashBalancer struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + config *LBConfig + + subConns map[resolver.Address]*subConn // `attributes` is stripped from the keys of this map (the addresses) + scStates map[balancer.SubConn]*subConn + + // ring is always in sync with subConns. When subConns change, a new ring is + // generated. Note that address weights updates (they are keys in the + // subConns map) also regenerates the ring. + ring *ring + picker balancer.Picker + csEvltr *connectivityStateEvaluator + state connectivity.State + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +// updateAddresses creates new SubConns and removes SubConns, based on the +// address update. +// +// The return value is whether the new address list is different from the +// previous. True if +// - an address was added +// - an address was removed +// - an address's weight was updated +// +// Note that this function doesn't trigger SubConn connecting, so all the new +// SubConn states are Idle. +func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { + var addrsUpdated bool + // addrsSet is the set converted from addrs, it's used for quick lookup of + // an address. + // + // Addresses in this map all have attributes stripped, but metadata set to + // the weight. So that weight change can be detected. + // + // TODO: this won't be necessary if there are ways to compare address + // attributes. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + aNoAttrs := a + // Strip attributes but set Metadata to the weight. + aNoAttrs.Attributes = nil + w := weightedroundrobin.GetAddrInfo(a).Weight + if w == 0 { + // If weight is not set, use 1. + w = 1 + } + aNoAttrs.Metadata = w + addrsSet[aNoAttrs] = struct{}{} + if scInfo, ok := b.subConns[aNoAttrs]; !ok { + // When creating SubConn, the original address with attributes is + // passed through. So that connection configurations in attributes + // (like creds) will be used. + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + scs := &subConn{addr: a.Addr, sc: sc} + scs.setState(connectivity.Idle) + b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) + b.subConns[aNoAttrs] = scs + b.scStates[sc] = scs + addrsUpdated = true + } else { + // Always update the subconn's address in case the attributes + // changed. The SubConn does a reflect.DeepEqual of the new and old + // addresses. So this is a noop if the current address is the same + // as the old one (including attributes). + b.subConns[aNoAttrs] = scInfo + b.cc.UpdateAddresses(scInfo.sc, []resolver.Address{a}) + } + } + for a, scInfo := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(scInfo.sc) + delete(b.subConns, a) + addrsUpdated = true + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + return addrsUpdated +} + +func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + if b.config == nil { + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + b.config = newConfig + } + + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + if b.updateAddresses(s.ResolverState.Addresses) { + // If addresses were updated, no matter whether it resulted in SubConn + // creation/deletion, or just weight update, we will need to regenerate + // the ring. + var err error + b.ring, err = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize) + if err != nil { + panic(err) + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + return nil +} + +func (b *ringhashBalancer) ResolverError(err error) { + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +// UpdateSubConnState updates the per-SubConn state stored in the ring, and also +// the aggregated state. +// +// It triggers an update to cc when: +// - the new state is TransientFailure, to update the error message +// - it's possible that this is a noop, but sending an extra update is easier +// than comparing errors +// - the aggregated state is changed +// - the same picker will be sent again, but this update may trigger a re-pick +// for some RPCs. +func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + b.logger.Infof("handle SubConn state change: %p, %v", sc, s) + scs, ok := b.scStates[sc] + if !ok { + b.logger.Infof("got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + oldSCState := scs.effectiveState() + scs.setState(s) + newSCState := scs.effectiveState() + + var sendUpdate bool + oldBalancerState := b.state + b.state = b.csEvltr.recordTransition(oldSCState, newSCState) + if oldBalancerState != b.state { + sendUpdate = true + } + + switch s { + case connectivity.Idle: + // When the overall state is TransientFailure, this will never get picks + // if there's a lower priority. Need to keep the SubConns connecting so + // there's a chance it will recover. + if b.state == connectivity.TransientFailure { + scs.queueConnect() + } + // No need to send an update. No queued RPC can be unblocked. If the + // overall state changed because of this, sendUpdate is already true. + case connectivity.Connecting: + // No need to send an update. No queued RPC can be unblocked. If the + // overall state changed because of this, sendUpdate is already true. + case connectivity.Ready: + // Resend the picker, there's no need to regenerate the picker because + // the ring didn't change. + sendUpdate = true + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + // Regenerate picker to update error message. + b.regeneratePicker() + sendUpdate = true + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + if sendUpdate { + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *ringhashBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *ringhashBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = base.NewErrPicker(b.mergeErrors()) + return + } + b.picker = newPicker(b.ring, b.logger) +} + +func (b *ringhashBalancer) Close() {} + +// connectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type connectivityStateEvaluator struct { + nums [5]uint64 +} + +// recordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If there is at least one subchannel in READY state, report READY. +// - If there are 2 or more subchannels in TRANSIENT_FAILURE state, report TRANSIENT_FAILURE. +// - If there is at least one subchannel in CONNECTING state, report CONNECTING. +// - If there is at least one subchannel in Idle state, report Idle. +// - Otherwise, report TRANSIENT_FAILURE. +// +// Note that if there are 1 connecting, 2 transient failure, the overall state +// is transient failure. This is because the second transient failure is a +// fallback of the first failing SubConn, and we want to report transient +// failure to failover to the lower priority. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + cse.nums[state] += updateVal + } + + if cse.nums[connectivity.Ready] > 0 { + return connectivity.Ready + } + if cse.nums[connectivity.TransientFailure] > 1 { + return connectivity.TransientFailure + } + if cse.nums[connectivity.Connecting] > 0 { + return connectivity.Connecting + } + if cse.nums[connectivity.Idle] > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go new file mode 100644 index 0000000000..92bb3ae5b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/util.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import "context" + +type clusterKey struct{} + +func getRequestHash(ctx context.Context) uint64 { + requestHash, _ := ctx.Value(clusterKey{}).(uint64) + return requestHash +} + +// GetRequestHashForTesting returns the request hash in the context; to be used +// for testing only. +func GetRequestHashForTesting(ctx context.Context) uint64 { + return getRequestHash(ctx) +} + +// SetRequestHash adds the request hash to the context for use in Ring Hash Load +// Balancing. +func SetRequestHash(ctx context.Context, requestHash uint64) context.Context { + return context.WithValue(ctx, clusterKey{}, requestHash) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go b/vendor/google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go index 6c36e2a69c..7e1d106e9f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -200,7 +200,9 @@ func (wbsa *Aggregator) BuildAndUpdate() { func (wbsa *Aggregator) build() balancer.State { wbsa.logger.Infof("Child pickers with config: %+v", wbsa.idToPickerState) m := wbsa.idToPickerState - var readyN, connectingN int + // TODO: use balancer.ConnectivityStateEvaluator to calculate the aggregated + // state. + var readyN, connectingN, idleN int readyPickerWithWeights := make([]weightedPickerState, 0, len(m)) for _, ps := range m { switch ps.stateToAggregate { @@ -209,6 +211,8 @@ func (wbsa *Aggregator) build() balancer.State { readyPickerWithWeights = append(readyPickerWithWeights, *ps) case connectivity.Connecting: connectingN++ + case connectivity.Idle: + idleN++ } } var aggregatedState connectivity.State @@ -217,6 +221,8 @@ func (wbsa *Aggregator) build() balancer.State { aggregatedState = connectivity.Ready case connectingN > 0: aggregatedState = connectivity.Connecting + case idleN > 0: + aggregatedState = connectivity.Idle default: aggregatedState = connectivity.TransientFailure } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedtarget.go index a210816332..f05e0aca19 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedtarget.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -36,20 +37,20 @@ import ( // Name is the name of the weighted_target balancer. const Name = "weighted_target_experimental" -// newRandomWRR is the WRR constructor used to pick sub-pickers from +// NewRandomWRR is the WRR constructor used to pick sub-pickers from // sub-balancers. It's to be modified in tests. -var newRandomWRR = wrr.NewRandom +var NewRandomWRR = wrr.NewRandom func init() { - balancer.Register(&weightedTargetBB{}) + balancer.Register(bb{}) } -type weightedTargetBB struct{} +type bb struct{} -func (wt *weightedTargetBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &weightedTargetBalancer{} b.logger = prefixLogger(b) - b.stateAggregator = weightedaggregator.New(cc, b.logger, newRandomWRR) + b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR) b.stateAggregator.Start() b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, nil, b.logger) b.bg.Start() @@ -57,11 +58,11 @@ func (wt *weightedTargetBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOp return b } -func (wt *weightedTargetBB) Name() string { +func (bb) Name() string { return Name } -func (wt *weightedTargetBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } @@ -80,9 +81,10 @@ type weightedTargetBalancer struct { } // UpdateClientConnState takes the new targets in balancer group, -// creates/deletes sub-balancers and sends them update. Addresses are split into +// creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. -func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { +func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) @@ -92,10 +94,10 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat var rebuildStateAndPicker bool // Remove sub-pickers and sub-balancers that are not in the new config. - for name := range w.targets { + for name := range b.targets { if _, ok := newConfig.Targets[name]; !ok { - w.stateAggregator.Remove(name) - w.bg.Remove(name) + b.stateAggregator.Remove(name) + b.bg.Remove(name) // Trigger a state/picker update, because we don't want `ClientConn` // to pick this sub-balancer anymore. rebuildStateAndPicker = true @@ -108,39 +110,39 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat // // For all sub-balancers, forward the address/balancer config update. for name, newT := range newConfig.Targets { - oldT, ok := w.targets[name] + oldT, ok := b.targets[name] if !ok { // If this is a new sub-balancer, add weights to the picker map. - w.stateAggregator.Add(name, newT.Weight) + b.stateAggregator.Add(name, newT.Weight) // Then add to the balancer group. - w.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) // Not trigger a state/picker update. Wait for the new sub-balancer // to send its updates. } else if newT.ChildPolicy.Name != oldT.ChildPolicy.Name { // If the child policy name is differet, remove from balancer group // and re-add. - w.stateAggregator.Remove(name) - w.bg.Remove(name) - w.stateAggregator.Add(name, newT.Weight) - w.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + b.stateAggregator.Remove(name) + b.bg.Remove(name) + b.stateAggregator.Add(name, newT.Weight) + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) // Trigger a state/picker update, because we don't want `ClientConn` // to pick this sub-balancer anymore. rebuildStateAndPicker = true } else if newT.Weight != oldT.Weight { // If this is an existing sub-balancer, update weight if necessary. - w.stateAggregator.UpdateWeight(name, newT.Weight) + b.stateAggregator.UpdateWeight(name, newT.Weight) // Trigger a state/picker update, because we don't want `ClientConn` // should do picks with the new weights now. rebuildStateAndPicker = true } // Forwards all the update: - // - Addresses are from the map after splitting with hierarchy path, + // - addresses are from the map after splitting with hierarchy path, // - Top level service config and attributes are the same, // - Balancer config comes from the targets map. // // TODO: handle error? How to aggregate errors and return? - _ = w.bg.UpdateClientConnState(name, balancer.ClientConnState{ + _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, @@ -150,23 +152,27 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat }) } - w.targets = newConfig.Targets + b.targets = newConfig.Targets if rebuildStateAndPicker { - w.stateAggregator.BuildAndUpdate() + b.stateAggregator.BuildAndUpdate() } return nil } -func (w *weightedTargetBalancer) ResolverError(err error) { - w.bg.ResolverError(err) +func (b *weightedTargetBalancer) ResolverError(err error) { + b.bg.ResolverError(err) } -func (w *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - w.bg.UpdateSubConnState(sc, state) +func (b *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.bg.UpdateSubConnState(sc, state) } -func (w *weightedTargetBalancer) Close() { - w.stateAggregator.Stop() - w.bg.Close() +func (b *weightedTargetBalancer) Close() { + b.stateAggregator.Stop() + b.bg.Close() +} + +func (b *weightedTargetBalancer) ExitIdle() { + b.bg.ExitIdle() } diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go index ee2ed9fd49..725b50a76a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/httpfilter" @@ -63,9 +62,7 @@ var statusMap = map[int]codes.Code{ } func init() { - if env.FaultInjectionSupport { - httpfilter.Register(builder{}) - } + httpfilter.Register(builder{}) } type builder struct { @@ -104,6 +101,10 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil return parseConfig(override) } +func (builder) IsTerminal() bool { + return false +} + var _ httpfilter.ClientInterceptorBuilder = builder{} func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go index 1f5f005e9b..b4399f9fae 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/httpfilter.go @@ -50,6 +50,9 @@ type Filter interface { // not accept a custom type. The resulting FilterConfig will later be // passed to Build. ParseFilterConfigOverride(proto.Message) (FilterConfig, error) + // IsTerminal returns whether this Filter is terminal or not (i.e. it must + // be last filter in the filter chain). + IsTerminal() bool } // ClientInterceptorBuilder constructs a Client Interceptor. If this type is @@ -91,6 +94,11 @@ func Register(b Filter) { } } +// UnregisterForTesting unregisters the HTTP Filter for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} + // Get returns the HTTPFilter registered with typeURL. // // If no filter is register with typeURL, nil will be returned. diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go index b0f9d9d9a1..1ac6518170 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go @@ -73,6 +73,10 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil return config{}, nil } +func (builder) IsTerminal() bool { + return true +} + var ( _ httpfilter.ClientInterceptorBuilder = builder{} _ httpfilter.ServerInterceptorBuilder = builder{} diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go index e4284ee02e..0cccd38241 100644 --- a/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -22,6 +22,8 @@ package internal import ( "encoding/json" "fmt" + + "google.golang.org/grpc/resolver" ) // LocalityID is xds.Locality without XXX fields, so it can be used as map @@ -53,3 +55,19 @@ func LocalityIDFromString(s string) (ret LocalityID, _ error) { } return ret, nil } + +type localityKeyType string + +const localityKey = localityKeyType("grpc.xds.internal.address.locality") + +// GetLocalityID returns the locality ID of addr. +func GetLocalityID(addr resolver.Address) LocalityID { + path, _ := addr.Attributes.Value(localityKey).(LocalityID) + return path +} + +// SetLocalityID sets locality ID in addr to l. +func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { + addr.Attributes = addr.Attributes.WithValues(localityKey, l) + return addr +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/matcher.go b/vendor/google.golang.org/grpc/xds/internal/resolver/matcher.go deleted file mode 100644 index 06456a5855..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/matcher.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package resolver - -import ( - "fmt" - "strings" - - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/grpcutil" - iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/metadata" - xdsclient "google.golang.org/grpc/xds/internal/client" -) - -func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { - var pathMatcher pathMatcherInterface - switch { - case r.Regex != nil: - pathMatcher = newPathRegexMatcher(r.Regex) - case r.Path != nil: - pathMatcher = newPathExactMatcher(*r.Path, r.CaseInsensitive) - case r.Prefix != nil: - pathMatcher = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive) - default: - return nil, fmt.Errorf("illegal route: missing path_matcher") - } - - var headerMatchers []headerMatcherInterface - for _, h := range r.Headers { - var matcherT headerMatcherInterface - switch { - case h.ExactMatch != nil && *h.ExactMatch != "": - matcherT = newHeaderExactMatcher(h.Name, *h.ExactMatch) - case h.RegexMatch != nil: - matcherT = newHeaderRegexMatcher(h.Name, h.RegexMatch) - case h.PrefixMatch != nil && *h.PrefixMatch != "": - matcherT = newHeaderPrefixMatcher(h.Name, *h.PrefixMatch) - case h.SuffixMatch != nil && *h.SuffixMatch != "": - matcherT = newHeaderSuffixMatcher(h.Name, *h.SuffixMatch) - case h.RangeMatch != nil: - matcherT = newHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End) - case h.PresentMatch != nil: - matcherT = newHeaderPresentMatcher(h.Name, *h.PresentMatch) - default: - return nil, fmt.Errorf("illegal route: missing header_match_specifier") - } - if h.InvertMatch != nil && *h.InvertMatch { - matcherT = newInvertMatcher(matcherT) - } - headerMatchers = append(headerMatchers, matcherT) - } - - var fractionMatcher *fractionMatcher - if r.Fraction != nil { - fractionMatcher = newFractionMatcher(*r.Fraction) - } - return newCompositeMatcher(pathMatcher, headerMatchers, fractionMatcher), nil -} - -// compositeMatcher.match returns true if all matchers return true. -type compositeMatcher struct { - pm pathMatcherInterface - hms []headerMatcherInterface - fm *fractionMatcher -} - -func newCompositeMatcher(pm pathMatcherInterface, hms []headerMatcherInterface, fm *fractionMatcher) *compositeMatcher { - return &compositeMatcher{pm: pm, hms: hms, fm: fm} -} - -func (a *compositeMatcher) match(info iresolver.RPCInfo) bool { - if a.pm != nil && !a.pm.match(info.Method) { - return false - } - - // Call headerMatchers even if md is nil, because routes may match - // non-presence of some headers. - var md metadata.MD - if info.Context != nil { - md, _ = metadata.FromOutgoingContext(info.Context) - if extraMD, ok := grpcutil.ExtraMetadata(info.Context); ok { - md = metadata.Join(md, extraMD) - // Remove all binary headers. They are hard to match with. May need - // to add back if asked by users. - for k := range md { - if strings.HasSuffix(k, "-bin") { - delete(md, k) - } - } - } - } - for _, m := range a.hms { - if !m.match(md) { - return false - } - } - - if a.fm != nil && !a.fm.match() { - return false - } - return true -} - -func (a *compositeMatcher) String() string { - var ret string - if a.pm != nil { - ret += a.pm.String() - } - for _, m := range a.hms { - ret += m.String() - } - if a.fm != nil { - ret += a.fm.String() - } - return ret -} - -type fractionMatcher struct { - fraction int64 // real fraction is fraction/1,000,000. -} - -func newFractionMatcher(fraction uint32) *fractionMatcher { - return &fractionMatcher{fraction: int64(fraction)} -} - -var grpcrandInt63n = grpcrand.Int63n - -func (fm *fractionMatcher) match() bool { - t := grpcrandInt63n(1000000) - return t <= fm.fraction -} - -func (fm *fractionMatcher) String() string { - return fmt.Sprintf("fraction:%v", fm.fraction) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/matcher_header.go b/vendor/google.golang.org/grpc/xds/internal/resolver/matcher_header.go deleted file mode 100644 index 05a92788d7..0000000000 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/matcher_header.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package resolver - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "google.golang.org/grpc/metadata" -) - -type headerMatcherInterface interface { - match(metadata.MD) bool - String() string -} - -// mdValuesFromOutgoingCtx retrieves metadata from context. If there are -// multiple values, the values are concatenated with "," (comma and no space). -// -// All header matchers only match against the comma-concatenated string. -func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { - vs, ok := md[key] - if !ok { - return "", false - } - return strings.Join(vs, ","), true -} - -type headerExactMatcher struct { - key string - exact string -} - -func newHeaderExactMatcher(key, exact string) *headerExactMatcher { - return &headerExactMatcher{key: key, exact: exact} -} - -func (hem *headerExactMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hem.key) - if !ok { - return false - } - return v == hem.exact -} - -func (hem *headerExactMatcher) String() string { - return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact) -} - -type headerRegexMatcher struct { - key string - re *regexp.Regexp -} - -func newHeaderRegexMatcher(key string, re *regexp.Regexp) *headerRegexMatcher { - return &headerRegexMatcher{key: key, re: re} -} - -func (hrm *headerRegexMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hrm.key) - if !ok { - return false - } - return hrm.re.MatchString(v) -} - -func (hrm *headerRegexMatcher) String() string { - return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String()) -} - -type headerRangeMatcher struct { - key string - start, end int64 // represents [start, end). -} - -func newHeaderRangeMatcher(key string, start, end int64) *headerRangeMatcher { - return &headerRangeMatcher{key: key, start: start, end: end} -} - -func (hrm *headerRangeMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hrm.key) - if !ok { - return false - } - if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { - return true - } - return false -} - -func (hrm *headerRangeMatcher) String() string { - return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end) -} - -type headerPresentMatcher struct { - key string - present bool -} - -func newHeaderPresentMatcher(key string, present bool) *headerPresentMatcher { - return &headerPresentMatcher{key: key, present: present} -} - -func (hpm *headerPresentMatcher) match(md metadata.MD) bool { - vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) - present := ok && len(vs) > 0 - return present == hpm.present -} - -func (hpm *headerPresentMatcher) String() string { - return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present) -} - -type headerPrefixMatcher struct { - key string - prefix string -} - -func newHeaderPrefixMatcher(key string, prefix string) *headerPrefixMatcher { - return &headerPrefixMatcher{key: key, prefix: prefix} -} - -func (hpm *headerPrefixMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hpm.key) - if !ok { - return false - } - return strings.HasPrefix(v, hpm.prefix) -} - -func (hpm *headerPrefixMatcher) String() string { - return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix) -} - -type headerSuffixMatcher struct { - key string - suffix string -} - -func newHeaderSuffixMatcher(key string, suffix string) *headerSuffixMatcher { - return &headerSuffixMatcher{key: key, suffix: suffix} -} - -func (hsm *headerSuffixMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hsm.key) - if !ok { - return false - } - return strings.HasSuffix(v, hsm.suffix) -} - -func (hsm *headerSuffixMatcher) String() string { - return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) -} - -type invertMatcher struct { - m headerMatcherInterface -} - -func newInvertMatcher(m headerMatcherInterface) *invertMatcher { - return &invertMatcher{m: m} -} - -func (i *invertMatcher) match(md metadata.MD) bool { - return !i.m.match(md) -} - -func (i *invertMatcher) String() string { - return fmt.Sprintf("invert{%s}", i.m) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index ef7c37128c..ddf699f938 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -22,18 +22,25 @@ import ( "context" "encoding/json" "fmt" + "math/bits" + "strings" "sync/atomic" "time" + xxhash "github.com/cespare/xxhash/v2" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/balancer/clustermanager" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/xdsclient" ) const ( @@ -76,7 +83,7 @@ func (r *xdsResolver) pruneActiveClusters() { // serviceConfigJSON produces a service config in JSON format representing all // the clusters referenced in activeClusters. This includes clusters with zero // references, so they must be pruned first. -func serviceConfigJSON(activeClusters map[string]*clusterInfo) (string, error) { +func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { // Generate children (all entries in activeClusters). children := make(map[string]xdsChildConfig) for cluster := range activeClusters { @@ -93,14 +100,16 @@ func serviceConfigJSON(activeClusters map[string]*clusterInfo) (string, error) { bs, err := json.Marshal(sc) if err != nil { - return "", fmt.Errorf("failed to marshal json: %v", err) + return nil, fmt.Errorf("failed to marshal json: %v", err) } - return string(bs), nil + return bs, nil } type virtualHost struct { // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig + // retry policy present in virtual host + retryConfig *xdsclient.RetryConfig } // routeCluster holds information about a cluster as referenced by a route. @@ -111,11 +120,13 @@ type routeCluster struct { } type route struct { - m *compositeMatcher // converted from route matchers - clusters wrr.WRR // holds *routeCluster entries + m *xdsclient.CompositeMatcher // converted from route matchers + clusters wrr.WRR // holds *routeCluster entries maxStreamDuration time.Duration // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig + retryConfig *xdsclient.RetryConfig + hashPolicies []*xdsclient.HashPolicy } func (r route) String() string { @@ -139,7 +150,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP var rt *route // Loop through routes in order and select first match. for _, r := range cs.routes { - if r.m.match(rpcInfo) { + if r.m.Match(rpcInfo) { rt = &r break } @@ -161,9 +172,15 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP return nil, err } + lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name) + // Request Hashes are only applicable for a Ring Hash LB. + if env.RingHashSupport { + lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies)) + } + config := &iresolver.RPCConfig{ - // Communicate to the LB policy the chosen cluster. - Context: clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name), + // Communicate to the LB policy the chosen cluster and request hash, if Ring Hash LB policy. + Context: lbCtx, OnCommitted: func() { // When the RPC is committed, the cluster is no longer required. // Decrease its ref. @@ -179,13 +196,83 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP Interceptor: interceptor, } - if env.TimeoutSupport && rt.maxStreamDuration != 0 { + if rt.maxStreamDuration != 0 { config.MethodConfig.Timeout = &rt.maxStreamDuration } + if rt.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(rt.retryConfig) + } else if cs.virtualHost.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(cs.virtualHost.retryConfig) + } return config, nil } +func retryConfigToPolicy(config *xdsclient.RetryConfig) *serviceconfig.RetryPolicy { + return &serviceconfig.RetryPolicy{ + MaxAttempts: int(config.NumRetries) + 1, + InitialBackoff: config.RetryBackoff.BaseInterval, + MaxBackoff: config.RetryBackoff.MaxInterval, + BackoffMultiplier: 2, + RetryableStatusCodes: config.RetryOn, + } +} + +func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsclient.HashPolicy) uint64 { + var hash uint64 + var generatedHash bool + for _, policy := range hashPolicies { + var policyHash uint64 + var generatedPolicyHash bool + switch policy.HashPolicyType { + case xdsclient.HashPolicyTypeHeader: + md, ok := metadata.FromOutgoingContext(rpcInfo.Context) + if !ok { + continue + } + values := md.Get(policy.HeaderName) + // If the header isn't present, no-op. + if len(values) == 0 { + continue + } + joinedValues := strings.Join(values, ",") + if policy.Regex != nil { + joinedValues = policy.Regex.ReplaceAllString(joinedValues, policy.RegexSubstitution) + } + policyHash = xxhash.Sum64String(joinedValues) + generatedHash = true + generatedPolicyHash = true + case xdsclient.HashPolicyTypeChannelID: + // Hash the ClientConn pointer which logically uniquely + // identifies the client. + policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc)) + generatedHash = true + generatedPolicyHash = true + } + + // Deterministically combine the hash policies. Rotating prevents + // duplicate hash policies from cancelling each other out and preserves + // the 64 bits of entropy. + if generatedPolicyHash { + hash = bits.RotateLeft64(hash, 1) + hash = hash ^ policyHash + } + + // If terminal policy and a hash has already been generated, ignore the + // rest of the policies and use that hash already generated. + if policy.Terminal && generatedHash { + break + } + } + + if generatedHash { + return hash + } + // If no generated hash return a random long. In the grand scheme of things + // this logically will map to choosing a random backend to route request to. + return grpcrand.Uint64() +} + func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) { if len(cs.httpFilterConfig) == 0 { return nil, nil @@ -254,8 +341,11 @@ var newWRR = wrr.NewRandom // r.activeClusters for previously-unseen clusters. func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, error) { cs := &configSelector{ - r: r, - virtualHost: virtualHost{httpFilterConfigOverride: su.virtualHost.HTTPFilterConfigOverride}, + r: r, + virtualHost: virtualHost{ + httpFilterConfigOverride: su.virtualHost.HTTPFilterConfigOverride, + retryConfig: su.virtualHost.RetryConfig, + }, routes: make([]route, len(su.virtualHost.Routes)), clusters: make(map[string]*clusterInfo), httpFilterConfig: su.ldsConfig.httpFilterConfig, @@ -282,7 +372,7 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro cs.routes[i].clusters = clusters var err error - cs.routes[i].m, err = routeToMatcher(rt) + cs.routes[i].m, err = xdsclient.RouteToMatcher(rt) if err != nil { return nil, err } @@ -293,6 +383,8 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro } cs.routes[i].httpFilterConfigOverride = rt.HTTPFilterConfigOverride + cs.routes[i].retryConfig = rt.RetryConfig + cs.routes[i].hashPolicies = rt.HashPolicies } // Account for this config selector's clusters. Do this after no further diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go index 7667592ccd..da0bf95f3b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go @@ -20,12 +20,12 @@ package resolver import ( "fmt" - "strings" "sync" "time" "google.golang.org/grpc/internal/grpclog" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient" ) // serviceUpdate contains information received from the LDS/RDS responses which @@ -53,7 +53,7 @@ type ldsConfig struct { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func watchService(c xdsClientInterface, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { +func watchService(c xdsclient.XDSClient, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { w := &serviceUpdateWatcher{ logger: logger, c: c, @@ -69,7 +69,7 @@ func watchService(c xdsClientInterface, serviceName string, cb func(serviceUpdat // callback at the right time. type serviceUpdateWatcher struct { logger *grpclog.PrefixLogger - c xdsClientInterface + c xdsclient.XDSClient serviceName string ldsCancel func() serviceCb func(serviceUpdate, error) @@ -82,7 +82,7 @@ type serviceUpdateWatcher struct { } func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, err error) { - w.logger.Infof("received LDS update: %+v, err: %v", update, err) + w.logger.Infof("received LDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() if w.closed { @@ -151,7 +151,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er } func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteConfigUpdate) { - matchVh := findBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) + matchVh := xdsclient.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) if matchVh == nil { // No matching virtual host found. w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName)) @@ -163,7 +163,7 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteC } func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, err error) { - w.logger.Infof("received RDS update: %+v, err: %v", update, err) + w.logger.Infof("received RDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() if w.closed { @@ -191,97 +191,3 @@ func (w *serviceUpdateWatcher) close() { w.rdsCancel = nil } } - -type domainMatchType int - -const ( - domainMatchTypeInvalid domainMatchType = iota - domainMatchTypeUniversal - domainMatchTypePrefix - domainMatchTypeSuffix - domainMatchTypeExact -) - -// Exact > Suffix > Prefix > Universal > Invalid. -func (t domainMatchType) betterThan(b domainMatchType) bool { - return t > b -} - -func matchTypeForDomain(d string) domainMatchType { - if d == "" { - return domainMatchTypeInvalid - } - if d == "*" { - return domainMatchTypeUniversal - } - if strings.HasPrefix(d, "*") { - return domainMatchTypeSuffix - } - if strings.HasSuffix(d, "*") { - return domainMatchTypePrefix - } - if strings.Contains(d, "*") { - return domainMatchTypeInvalid - } - return domainMatchTypeExact -} - -func match(domain, host string) (domainMatchType, bool) { - switch typ := matchTypeForDomain(domain); typ { - case domainMatchTypeInvalid: - return typ, false - case domainMatchTypeUniversal: - return typ, true - case domainMatchTypePrefix: - // abc.* - return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) - case domainMatchTypeSuffix: - // *.123 - return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) - case domainMatchTypeExact: - return typ, domain == host - default: - return domainMatchTypeInvalid, false - } -} - -// findBestMatchingVirtualHost returns the virtual host whose domains field best -// matches host -// -// The domains field support 4 different matching pattern types: -// - Exact match -// - Suffix match (e.g. “*ABC”) -// - Prefix match (e.g. “ABC*) -// - Universal match (e.g. “*”) -// -// The best match is defined as: -// - A match is better if it’s matching pattern type is better -// - Exact match > suffix match > prefix match > universal match -// - If two matches are of the same pattern type, the longer match is better -// - This is to compare the length of the matching pattern, e.g. “*ABCDE” > -// “*ABC” -func findBestMatchingVirtualHost(host string, vHosts []*xdsclient.VirtualHost) *xdsclient.VirtualHost { - var ( - matchVh *xdsclient.VirtualHost - matchType = domainMatchTypeInvalid - matchLen int - ) - for _, vh := range vHosts { - for _, domain := range vh.Domains { - typ, matched := match(domain, host) - if typ == domainMatchTypeInvalid { - // The rds response is invalid. - return nil - } - if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { - // The previous match has better type, or the previous match has - // better length, or this domain isn't a match. - continue - } - matchVh = vh - matchType = typ - matchLen = len(domain) - } - } - return matchVh -} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go index d8c09db69b..19ee01773e 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -26,23 +26,35 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/client/bootstrap" - + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/xdsclient" ) const xdsScheme = "xds" +// NewBuilder creates a new xds resolver builder using a specific xds bootstrap +// config, so tests can use multiple xds clients in different ClientConns at +// the same time. +func NewBuilder(config []byte) (resolver.Builder, error) { + return &xdsResolverBuilder{ + newXDSClient: func() (xdsclient.XDSClient, error) { + return xdsclient.NewClientWithBootstrapContents(config) + }, + }, nil +} + // For overriding in unittests. -var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } +var newXDSClient = func() (xdsclient.XDSClient, error) { return xdsclient.New() } func init() { resolver.Register(&xdsResolverBuilder{}) } -type xdsResolverBuilder struct{} +type xdsResolverBuilder struct { + newXDSClient func() (xdsclient.XDSClient, error) +} // Build helps implement the resolver.Builder interface. // @@ -59,6 +71,11 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op r.logger = prefixLogger((r)) r.logger.Infof("Creating resolver for target: %+v", t) + newXDSClient := newXDSClient + if b.newXDSClient != nil { + newXDSClient = b.newXDSClient + } + client, err := newXDSClient() if err != nil { return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) @@ -100,15 +117,6 @@ func (*xdsResolverBuilder) Scheme() string { return xdsScheme } -// xdsClientInterface contains methods from xdsClient.Client which are used by -// the resolver. This will be faked out in unittests. -type xdsClientInterface interface { - WatchListener(serviceName string, cb func(xdsclient.ListenerUpdate, error)) func() - WatchRouteConfig(routeName string, cb func(xdsclient.RouteConfigUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - // suWithError wraps the ServiceUpdate and error received through a watch API // callback, so that it can pushed onto the update channel as a single entity. type suWithError struct { @@ -130,7 +138,7 @@ type xdsResolver struct { logger *grpclog.PrefixLogger // The underlying xdsClient which performs all xDS requests and responses. - client xdsClientInterface + client xdsclient.XDSClient // A channel for the watch API callback to write service updates on to. The // updates are read by the run goroutine and passed on to the ClientConn. updateCh chan suWithError @@ -171,13 +179,13 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { r.cc.ReportError(err) return false } - r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, sc) + r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, pretty.FormatJSON(sc)) // Send the update to the ClientConn. state := iresolver.SetConfigSelector(resolver.State{ - ServiceConfig: r.cc.ParseServiceConfig(sc), + ServiceConfig: r.cc.ParseServiceConfig(string(sc)), }, cs) - r.cc.UpdateState(state) + r.cc.UpdateState(xdsclient.SetClient(state, r.client)) return true } diff --git a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go index a02d75b214..dd0374dc88 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go @@ -27,7 +27,7 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" xdsinternal "google.golang.org/grpc/internal/credentials/xds" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient" ) // connWrapper is a thin wrapper around a net.Conn returned by Accept(). It @@ -58,6 +58,15 @@ type connWrapper struct { // completing the HTTP2 handshake. deadlineMu sync.Mutex deadline time.Time + + // The virtual hosts with matchable routes and instantiated HTTP Filters per + // route. + virtualHosts []xdsclient.VirtualHostWithInterceptors +} + +// VirtualHosts returns the virtual hosts to be used for server side routing. +func (c *connWrapper) VirtualHosts() []xdsclient.VirtualHostWithInterceptors { + return c.virtualHosts } // SetDeadline makes a copy of the passed in deadline and forwards the call to @@ -102,7 +111,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs // Identity provider name is mandatory on the server-side, and this is - // enforced when the resource is received at the xdsClient layer. + // enforced when the resource is received at the XDSClient layer. secCfg := c.filterChain.SecurityCfg ip, err := buildProviderFunc(cpc, secCfg.IdentityInstanceName, secCfg.IdentityCertName, true, false) if err != nil { @@ -124,6 +133,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { return xdsHI, nil } +// Close closes the providers and the underlying connection. func (c *connWrapper) Close() error { if c.identityProvider != nil { c.identityProvider.Close() diff --git a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go index 17f31f28f5..99c9a75323 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go @@ -21,18 +21,23 @@ package server import ( + "errors" "fmt" "net" "sync" + "sync/atomic" "time" + "unsafe" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" internalbackoff "google.golang.org/grpc/internal/backoff" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) var ( @@ -48,50 +53,28 @@ var ( backoffFunc = bs.Backoff ) -// ServingMode indicates the current mode of operation of the server. -// -// This API exactly mirrors the one in the public xds package. We have to -// redefine it here to avoid a cyclic dependency. -type ServingMode int - -const ( - // ServingModeStarting indicates that the serving is starting up. - ServingModeStarting ServingMode = iota - // ServingModeServing indicates the the server contains all required xDS - // configuration is serving RPCs. - ServingModeServing - // ServingModeNotServing indicates that the server is not accepting new - // connections. Existing connections will be closed gracefully, allowing - // in-progress RPCs to complete. A server enters this mode when it does not - // contain the required xDS configuration to serve RPCs. - ServingModeNotServing -) - -func (s ServingMode) String() string { - switch s { - case ServingModeNotServing: - return "not-serving" - case ServingModeServing: - return "serving" - default: - return "starting" - } -} - // ServingModeCallback is the callback that users can register to get notified // about the server's serving mode changes. The callback is invoked with the // address of the listener and its new mode. The err parameter is set to a // non-nil error if the server has transitioned into not-serving mode. -type ServingModeCallback func(addr net.Addr, mode ServingMode, err error) +type ServingModeCallback func(addr net.Addr, mode connectivity.ServingMode, err error) + +// DrainCallback is the callback that an xDS-enabled server registers to get +// notified about updates to the Listener configuration. The server is expected +// to gracefully shutdown existing connections, thereby forcing clients to +// reconnect and have the new configuration applied to the newly created +// connections. +type DrainCallback func(addr net.Addr) func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", p)) } -// XDSClientInterface wraps the methods on the xdsClient which are required by +// XDSClient wraps the methods on the XDSClient which are required by // the listenerWrapper. -type XDSClientInterface interface { +type XDSClient interface { WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsclient.RouteConfigUpdate, error)) func() BootstrapConfig() *bootstrap.Config } @@ -104,10 +87,13 @@ type ListenerWrapperParams struct { // XDSCredsInUse specifies whether or not the user expressed interest to // receive security configuration from the control plane. XDSCredsInUse bool - // XDSClient provides the functionality from the xdsClient required here. - XDSClient XDSClientInterface + // XDSClient provides the functionality from the XDSClient required here. + XDSClient XDSClient // ModeCallback is the callback to invoke when the serving mode changes. ModeCallback ServingModeCallback + // DrainCallback is the callback to invoke when the Listener gets a LDS + // update. + DrainCallback DrainCallback } // NewListenerWrapper creates a new listenerWrapper with params. It returns a @@ -122,10 +108,13 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru xdsCredsInUse: params.XDSCredsInUse, xdsC: params.XDSClient, modeCallback: params.ModeCallback, + drainCallback: params.DrainCallback, isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), - closed: grpcsync.NewEvent(), - goodUpdate: grpcsync.NewEvent(), + closed: grpcsync.NewEvent(), + goodUpdate: grpcsync.NewEvent(), + ldsUpdateCh: make(chan ldsUpdateWithError, 1), + rdsUpdateCh: make(chan rdsHandlerUpdate, 1), } lw.logger = prefixLogger(lw) @@ -134,15 +123,23 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru lisAddr := lw.Listener.Addr().String() lw.addr, lw.port, _ = net.SplitHostPort(lisAddr) + lw.rdsHandler = newRDSHandler(lw.xdsC, lw.rdsUpdateCh) + cancelWatch := lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) lw.logger.Infof("Watch started on resource name %v", lw.name) lw.cancelWatch = func() { cancelWatch() lw.logger.Infof("Watch cancelled on resource name %v", lw.name) } + go lw.run() return lw, lw.goodUpdate.Done() } +type ldsUpdateWithError struct { + update xdsclient.ListenerUpdate + err error +} + // listenerWrapper wraps the net.Listener associated with the listening address // passed to Serve(). It also contains all other state associated with this // particular invocation of Serve(). @@ -152,9 +149,10 @@ type listenerWrapper struct { name string xdsCredsInUse bool - xdsC XDSClientInterface + xdsC XDSClient cancelWatch func() modeCallback ServingModeCallback + drainCallback DrainCallback // Set to true if the listener is bound to the IP_ANY address (which is // "0.0.0.0" for IPv4 and "::" for IPv6). @@ -168,7 +166,7 @@ type listenerWrapper struct { // instead of a vanilla channel simplifies the update handler as it need not // keep track of whether the received update is the first one or not. goodUpdate *grpcsync.Event - // A small race exists in the xdsClient code between the receipt of an xDS + // A small race exists in the XDSClient code between the receipt of an xDS // response and the user cancelling the associated watch. In this window, // the registered callback may be invoked after the watch is canceled, and // the user is expected to work around this. This event signifies that the @@ -182,9 +180,20 @@ type listenerWrapper struct { // get a Listener resource update). mu sync.RWMutex // Current serving mode. - mode ServingMode + mode connectivity.ServingMode // Filter chains received as part of the last good update. filterChains *xdsclient.FilterChainManager + + // rdsHandler is used for any dynamic RDS resources specified in a LDS + // update. + rdsHandler *rdsHandler + // rdsUpdates are the RDS resources received from the management + // server, keyed on the RouteName of the RDS resource. + rdsUpdates unsafe.Pointer // map[string]xdsclient.RouteConfigUpdate + // ldsUpdateCh is a channel for XDSClient LDS updates. + ldsUpdateCh chan ldsUpdateWithError + // rdsUpdateCh is a channel for XDSClient RDS updates. + rdsUpdateCh chan rdsHandlerUpdate } // Accept blocks on an Accept() on the underlying listener, and wraps the @@ -230,7 +239,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { } l.mu.RLock() - if l.mode == ServingModeNotServing { + if l.mode == connectivity.ServingModeNotServing { // Close connections as soon as we accept them when we are in // "not-serving" mode. Since we accept a net.Listener from the user // in Serve(), we cannot close the listener when we move to @@ -264,7 +273,39 @@ func (l *listenerWrapper) Accept() (net.Conn, error) { conn.Close() continue } - return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil + if !env.RBACSupport { + return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil + } + var rc xdsclient.RouteConfigUpdate + if fc.InlineRouteConfig != nil { + rc = *fc.InlineRouteConfig + } else { + rcPtr := atomic.LoadPointer(&l.rdsUpdates) + rcuPtr := (*map[string]xdsclient.RouteConfigUpdate)(rcPtr) + // This shouldn't happen, but this error protects against a panic. + if rcuPtr == nil { + return nil, errors.New("route configuration pointer is nil") + } + rcu := *rcuPtr + rc = rcu[fc.RouteConfigName] + } + // The filter chain will construct a usuable route table on each + // connection accept. This is done because preinstantiating every route + // table before it is needed for a connection would potentially lead to + // a lot of cpu time and memory allocated for route tables that will + // never be used. There was also a thought to cache this configuration, + // and reuse it for the next accepted connection. However, this would + // lead to a lot of code complexity (RDS Updates for a given route name + // can come it at any time), and connections aren't accepted too often, + // so this reinstantation of the Route Configuration is an acceptable + // tradeoff for simplicity. + vhswi, err := fc.ConstructUsableRouteConfiguration(rc) + if err != nil { + l.logger.Warningf("route configuration construction: %v", err) + conn.Close() + continue + } + return &connWrapper{Conn: conn, filterChain: fc, parent: l, virtualHosts: vhswi}, nil } } @@ -277,49 +318,118 @@ func (l *listenerWrapper) Close() error { if l.cancelWatch != nil { l.cancelWatch() } + l.rdsHandler.close() return nil } +// run is a long running goroutine which handles all xds updates. LDS and RDS +// push updates onto a channel which is read and acted upon from this goroutine. +func (l *listenerWrapper) run() { + for { + select { + case <-l.closed.Done(): + return + case u := <-l.ldsUpdateCh: + l.handleLDSUpdate(u) + case u := <-l.rdsUpdateCh: + l.handleRDSUpdate(u) + } + } +} + +// handleLDSUpdate is the callback which handles LDS Updates. It writes the +// received update to the update channel, which is picked up by the run +// goroutine. func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, err error) { if l.closed.HasFired() { l.logger.Warningf("Resource %q received update: %v with error: %v, after listener was closed", l.name, update, err) return } + // Remove any existing entry in ldsUpdateCh and replace with the new one, as the only update + // listener cares about is most recent update. + select { + case <-l.ldsUpdateCh: + default: + } + l.ldsUpdateCh <- ldsUpdateWithError{update: update, err: err} +} - if err != nil { - l.logger.Warningf("Received error for resource %q: %+v", l.name, err) - if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { - l.switchMode(nil, ServingModeNotServing, err) +// handleRDSUpdate handles a full rds update from rds handler. On a successful +// update, the server will switch to ServingModeServing as the full +// configuration (both LDS and RDS) has been received. +func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) { + if l.closed.HasFired() { + l.logger.Warningf("RDS received update: %v with error: %v, after listener was closed", update.updates, update.err) + return + } + if update.err != nil { + l.logger.Warningf("Received error for rds names specified in resource %q: %+v", l.name, update.err) + if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { + l.switchMode(nil, connectivity.ServingModeNotServing, update.err) } // For errors which are anything other than "resource-not-found", we // continue to use the old configuration. return } - l.logger.Infof("Received update for resource %q: %+v", l.name, update) + atomic.StorePointer(&l.rdsUpdates, unsafe.Pointer(&update.updates)) + + l.switchMode(l.filterChains, connectivity.ServingModeServing, nil) + l.goodUpdate.Fire() +} + +func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { + if update.err != nil { + l.logger.Warningf("Received error for resource %q: %+v", l.name, update.err) + if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { + l.switchMode(nil, connectivity.ServingModeNotServing, update.err) + } + // For errors which are anything other than "resource-not-found", we + // continue to use the old configuration. + return + } + l.logger.Infof("Received update for resource %q: %+v", l.name, update.update) // Make sure that the socket address on the received Listener resource // matches the address of the net.Listener passed to us by the user. This - // check is done here instead of at the xdsClient layer because of the + // check is done here instead of at the XDSClient layer because of the // following couple of reasons: - // - xdsClient cannot know the listening address of every listener in the + // - XDSClient cannot know the listening address of every listener in the // system, and hence cannot perform this check. // - this is a very context-dependent check and only the server has the // appropriate context to perform this check. // - // What this means is that the xdsClient has ACKed a resource which can push + // What this means is that the XDSClient has ACKed a resource which can push // the server into a "not serving" mode. This is not ideal, but this is // what we have decided to do. See gRPC A36 for more details. - ilc := update.InboundListenerCfg + ilc := update.update.InboundListenerCfg if ilc.Address != l.addr || ilc.Port != l.port { - l.switchMode(nil, ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) + l.switchMode(nil, connectivity.ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) return } - l.switchMode(ilc.FilterChains, ServingModeServing, nil) - l.goodUpdate.Fire() + // "Updates to a Listener cause all older connections on that Listener to be + // gracefully shut down with a grace period of 10 minutes for long-lived + // RPC's, such that clients will reconnect and have the updated + // configuration apply." - A36 Note that this is not the same as moving the + // Server's state to ServingModeNotServing. That prevents new connections + // from being accepted, whereas here we simply want the clients to reconnect + // to get the updated configuration. + if env.RBACSupport { + if l.drainCallback != nil { + l.drainCallback(l.Listener.Addr()) + } + } + l.rdsHandler.updateRouteNamesToWatch(ilc.FilterChains.RouteConfigNames) + // If there are no dynamic RDS Configurations still needed to be received + // from the management server, this listener has all the configuration + // needed, and is ready to serve. + if len(ilc.FilterChains.RouteConfigNames) == 0 { + l.switchMode(ilc.FilterChains, connectivity.ServingModeServing, nil) + l.goodUpdate.Fire() + } } -func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode ServingMode, err error) { +func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode connectivity.ServingMode, err error) { l.mu.Lock() defer l.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go new file mode 100644 index 0000000000..cc676c4ca0 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient" +) + +// rdsHandlerUpdate wraps the full RouteConfigUpdate that are dynamically +// queried for a given server side listener. +type rdsHandlerUpdate struct { + updates map[string]xdsclient.RouteConfigUpdate + err error +} + +// rdsHandler handles any RDS queries that need to be started for a given server +// side listeners Filter Chains (i.e. not inline). +type rdsHandler struct { + xdsC XDSClient + + mu sync.Mutex + updates map[string]xdsclient.RouteConfigUpdate + cancels map[string]func() + + // For a rdsHandler update, the only update wrapped listener cares about is + // most recent one, so this channel will be opportunistically drained before + // sending any new updates. + updateChannel chan rdsHandlerUpdate +} + +// newRDSHandler creates a new rdsHandler to watch for RDS resources. +// listenerWrapper updates the list of route names to watch by calling +// updateRouteNamesToWatch() upon receipt of new Listener configuration. +func newRDSHandler(xdsC XDSClient, ch chan rdsHandlerUpdate) *rdsHandler { + return &rdsHandler{ + xdsC: xdsC, + updateChannel: ch, + updates: make(map[string]xdsclient.RouteConfigUpdate), + cancels: make(map[string]func()), + } +} + +// updateRouteNamesToWatch handles a list of route names to watch for a given +// server side listener (if a filter chain specifies dynamic RDS configuration). +// This function handles all the logic with respect to any routes that may have +// been added or deleted as compared to what was previously present. +func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) { + rh.mu.Lock() + defer rh.mu.Unlock() + // Add and start watches for any routes for any new routes in + // routeNamesToWatch. + for routeName := range routeNamesToWatch { + if _, ok := rh.cancels[routeName]; !ok { + func(routeName string) { + rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsclient.RouteConfigUpdate, err error) { + rh.handleRouteUpdate(routeName, update, err) + }) + }(routeName) + } + } + + // Delete and cancel watches for any routes from persisted routeNamesToWatch + // that are no longer present. + for routeName := range rh.cancels { + if _, ok := routeNamesToWatch[routeName]; !ok { + rh.cancels[routeName]() + delete(rh.cancels, routeName) + delete(rh.updates, routeName) + } + } + + // If the full list (determined by length) of updates are now successfully + // updated, the listener is ready to be updated. + if len(rh.updates) == len(rh.cancels) && len(routeNamesToWatch) != 0 { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{updates: rh.updates}) + } +} + +// handleRouteUpdate persists the route config for a given route name, and also +// sends an update to the Listener Wrapper on an error received or if the rds +// handler has a full collection of updates. +func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsclient.RouteConfigUpdate, err error) { + if err != nil { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{err: err}) + return + } + rh.mu.Lock() + defer rh.mu.Unlock() + rh.updates[routeName] = update + + // If the full list (determined by length) of updates have successfully + // updated, the listener is ready to be updated. + if len(rh.updates) == len(rh.cancels) { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{updates: rh.updates}) + } +} + +func drainAndPush(ch chan rdsHandlerUpdate, update rdsHandlerUpdate) { + select { + case <-ch: + default: + } + ch <- update +} + +// close() is meant to be called by wrapped listener when the wrapped listener +// is closed, and it cleans up resources by canceling all the active RDS +// watches. +func (rh *rdsHandler) close() { + rh.mu.Lock() + defer rh.mu.Unlock() + for _, cancel := range rh.cancels { + cancel() + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go new file mode 100644 index 0000000000..d2357df072 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go @@ -0,0 +1,59 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +type clientKeyType string + +const clientKey = clientKeyType("grpc.xds.internal.client.Client") + +// XDSClient is a full fledged gRPC client which queries a set of discovery APIs +// (collectively termed as xDS) on a remote management server, to discover +// various dynamic resources. +type XDSClient interface { + WatchListener(string, func(ListenerUpdate, error)) func() + WatchRouteConfig(string, func(RouteConfigUpdate, error)) func() + WatchCluster(string, func(ClusterUpdate, error)) func() + WatchEndpoints(clusterName string, edsCb func(EndpointsUpdate, error)) (cancel func()) + ReportLoad(server string) (*load.Store, func()) + + DumpLDS() (string, map[string]UpdateWithMD) + DumpRDS() (string, map[string]UpdateWithMD) + DumpCDS() (string, map[string]UpdateWithMD) + DumpEDS() (string, map[string]UpdateWithMD) + + BootstrapConfig() *bootstrap.Config + Close() +} + +// FromResolverState returns the Client from state, or nil if not present. +func FromResolverState(state resolver.State) XDSClient { + cs, _ := state.Attributes.Value(clientKey).(XDSClient) + return cs +} + +// SetClient sets c in state and returns the new state. +func SetClient(state resolver.State, c XDSClient) resolver.State { + state.Attributes = state.Attributes.WithValues(clientKey, c) + return state +} diff --git a/vendor/google.golang.org/grpc/xds/internal/client/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go similarity index 93% rename from vendor/google.golang.org/grpc/xds/internal/client/bootstrap/bootstrap.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go index f32c698b4f..fa229d9959 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/bootstrap/bootstrap.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal/version" ) @@ -126,16 +127,18 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { // // The format of the bootstrap file will be as follows: // { -// "xds_server": { -// "server_uri": , -// "channel_creds": [ -// { -// "type": , -// "config": -// } -// ], -// "server_features": [ ... ], -// }, +// "xds_servers": [ +// { +// "server_uri": , +// "channel_creds": [ +// { +// "type": , +// "config": +// } +// ], +// "server_features": [ ... ], +// } +// ], // "node": , // "certificate_providers" : { // "default": { @@ -159,13 +162,19 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) { // fields left unspecified, in which case the caller should use some sane // defaults. func NewConfig() (*Config, error) { - config := &Config{} - data, err := bootstrapConfigFromEnvVariable() if err != nil { return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) } logger.Debugf("Bootstrap content: %s", data) + return NewConfigFromContents(data) +} + +// NewConfigFromContents returns a new Config using the specified bootstrap +// file contents instead of reading the environment variable. This is only +// suitable for testing purposes. +func NewConfigFromContents(data []byte) (*Config, error) { + config := &Config{} var jsonData map[string]json.RawMessage if err := json.Unmarshal(data, &jsonData); err != nil { @@ -270,7 +279,7 @@ func NewConfig() (*Config, error) { if err := config.updateNodeProto(); err != nil { return nil, err } - logger.Infof("Bootstrap config for creating xds-client: %+v", config) + logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) return config, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/bootstrap/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/logging.go similarity index 100% rename from vendor/google.golang.org/grpc/xds/internal/client/bootstrap/logging.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/logging.go diff --git a/vendor/google.golang.org/grpc/xds/internal/client/callback.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/callback.go similarity index 66% rename from vendor/google.golang.org/grpc/xds/internal/client/callback.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/callback.go index da8e2f62d6..0374389fbc 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/callback.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/callback.go @@ -16,7 +16,9 @@ * */ -package client +package xdsclient + +import "google.golang.org/grpc/internal/pretty" type watcherInfoWithUpdate struct { wi *watchInfo @@ -74,39 +76,45 @@ func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) { // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata UpdateMetadata) { +func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, metadata UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() + c.ldsVersion = metadata.Version if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. c.ldsVersion = metadata.ErrState.Version - for name := range updates { - if _, ok := c.ldsWatchers[name]; ok { + } + for name, uErr := range updates { + if s, ok := c.ldsWatchers[name]; ok { + if uErr.Err != nil { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.ldsMD[name] mdCopy.ErrState = metadata.ErrState mdCopy.Status = metadata.Status c.ldsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. + for wi := range s { + wi.newError(uErr.Err) + } + continue } - } - return - } - - // If no error received, the status is ACK. - c.ldsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.ldsWatchers[name]; ok { - // Only send the update if this is not an error. + // If the resource is valid, send the update. for wi := range s { - wi.newUpdate(update) + wi.newUpdate(uErr.Update) } // Sync cache. - c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, update) - c.ldsCache[name] = update - c.ldsMD[name] = metadata + c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + c.ldsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + c.ldsMD[name] = mdCopy } } // Resources not in the new update were removed by the server, so delete @@ -133,39 +141,46 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata Up // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metadata UpdateMetadata) { +func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTuple, metadata UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() + // If no error received, the status is ACK. + c.rdsVersion = metadata.Version if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. c.rdsVersion = metadata.ErrState.Version - for name := range updates { - if _, ok := c.rdsWatchers[name]; ok { + } + for name, uErr := range updates { + if s, ok := c.rdsWatchers[name]; ok { + if uErr.Err != nil { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.rdsMD[name] mdCopy.ErrState = metadata.ErrState mdCopy.Status = metadata.Status c.rdsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. + for wi := range s { + wi.newError(uErr.Err) + } + continue } - } - return - } - - // If no error received, the status is ACK. - c.rdsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.rdsWatchers[name]; ok { - // Only send the update if this is not an error. + // If the resource is valid, send the update. for wi := range s { - wi.newUpdate(update) + wi.newUpdate(uErr.Update) } // Sync cache. - c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, update) - c.rdsCache[name] = update - c.rdsMD[name] = metadata + c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + c.rdsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + c.rdsMD[name] = mdCopy } } } @@ -175,39 +190,47 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metad // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata UpdateMetadata) { +func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metadata UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() + c.cdsVersion = metadata.Version if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. c.cdsVersion = metadata.ErrState.Version - for name := range updates { - if _, ok := c.cdsWatchers[name]; ok { + } + for name, uErr := range updates { + if s, ok := c.cdsWatchers[name]; ok { + if uErr.Err != nil { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.cdsMD[name] mdCopy.ErrState = metadata.ErrState mdCopy.Status = metadata.Status c.cdsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. + for wi := range s { + // Send the watcher the individual error, instead of the + // overall combined error from the metadata.ErrState. + wi.newError(uErr.Err) + } + continue } - } - return - } - - // If no error received, the status is ACK. - c.cdsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.cdsWatchers[name]; ok { - // Only send the update if this is not an error. + // If the resource is valid, send the update. for wi := range s { - wi.newUpdate(update) + wi.newUpdate(uErr.Update) } // Sync cache. - c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, update) - c.cdsCache[name] = update - c.cdsMD[name] = metadata + c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + c.cdsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + c.cdsMD[name] = mdCopy } } // Resources not in the new update were removed by the server, so delete @@ -234,39 +257,60 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata Upda // // A response can contain multiple resources. They will be parsed and put in a // map from resource name to the resource content. -func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata UpdateMetadata) { +func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, metadata UpdateMetadata) { c.mu.Lock() defer c.mu.Unlock() + c.edsVersion = metadata.Version if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. c.edsVersion = metadata.ErrState.Version - for name := range updates { - if _, ok := c.edsWatchers[name]; ok { + } + for name, uErr := range updates { + if s, ok := c.edsWatchers[name]; ok { + if uErr.Err != nil { // On error, keep previous version for each resource. But update // status and error. mdCopy := c.edsMD[name] mdCopy.ErrState = metadata.ErrState mdCopy.Status = metadata.Status c.edsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. + for wi := range s { + // Send the watcher the individual error, instead of the + // overall combined error from the metadata.ErrState. + wi.newError(uErr.Err) + } + continue } - } - return - } - - // If no error received, the status is ACK. - c.edsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.edsWatchers[name]; ok { - // Only send the update if this is not an error. + // If the resource is valid, send the update. for wi := range s { - wi.newUpdate(update) + wi.newUpdate(uErr.Update) } // Sync cache. - c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, update) - c.edsCache[name] = update - c.edsMD[name] = metadata + c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr)) + c.edsCache[name] = uErr.Update + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + mdCopy := metadata + mdCopy.Status = ServiceStatusACKed + mdCopy.ErrState = nil + if metadata.ErrState != nil { + mdCopy.Version = metadata.ErrState.Version + } + c.edsMD[name] = mdCopy + } + } +} + +// NewConnectionError is called by the underlying xdsAPIClient when it receives +// a connection error. The error will be forwarded to all the resource watchers. +func (c *clientImpl) NewConnectionError(err error) { + c.mu.Lock() + defer c.mu.Unlock() + + for _, s := range c.edsWatchers { + for wi := range s { + wi.newError(NewErrorf(ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) } } } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go similarity index 82% rename from vendor/google.golang.org/grpc/xds/internal/client/client.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index 603632801b..e549d55895 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -16,9 +16,9 @@ * */ -// Package client implements a full fledged gRPC client for the xDS API used by -// the xds resolver and balancer implementations. -package client +// Package xdsclient implements a full fledged gRPC client for the xDS API used +// by the xds resolver and balancer implementations. +package xdsclient import ( "context" @@ -33,9 +33,10 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc" "google.golang.org/grpc/internal/backoff" @@ -44,8 +45,8 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) var ( @@ -133,14 +134,17 @@ type loadReportingOptions struct { // resource updates from an APIClient for a specific version. type UpdateHandler interface { // NewListeners handles updates to xDS listener resources. - NewListeners(map[string]ListenerUpdate, UpdateMetadata) + NewListeners(map[string]ListenerUpdateErrTuple, UpdateMetadata) // NewRouteConfigs handles updates to xDS RouteConfiguration resources. - NewRouteConfigs(map[string]RouteConfigUpdate, UpdateMetadata) + NewRouteConfigs(map[string]RouteConfigUpdateErrTuple, UpdateMetadata) // NewClusters handles updates to xDS Cluster resources. - NewClusters(map[string]ClusterUpdate, UpdateMetadata) + NewClusters(map[string]ClusterUpdateErrTuple, UpdateMetadata) // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely // referred to as Endpoints) resources. - NewEndpoints(map[string]EndpointsUpdate, UpdateMetadata) + NewEndpoints(map[string]EndpointsUpdateErrTuple, UpdateMetadata) + // NewConnectionError handles connection errors from the xDS stream. The + // error will be reported to all the resource watchers. + NewConnectionError(err error) } // ServiceStatus is the status of the update. @@ -248,7 +252,6 @@ type InboundListenerConfig struct { // of interest to the registered RDS watcher. type RouteConfigUpdate struct { VirtualHosts []*VirtualHost - // Raw is the resource from the xds response. Raw *anypb.Any } @@ -267,8 +270,67 @@ type VirtualHost struct { // may be unused if the matching Route contains an override for that // filter. HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig +} + +// RetryConfig contains all retry-related configuration in either a VirtualHost +// or Route. +type RetryConfig struct { + // RetryOn is a set of status codes on which to retry. Only Canceled, + // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are + // supported; any other values will be omitted. + RetryOn map[codes.Code]bool + NumRetries uint32 // maximum number of retry attempts + RetryBackoff RetryBackoff // retry backoff policy +} + +// RetryBackoff describes the backoff policy for retries. +type RetryBackoff struct { + BaseInterval time.Duration // initial backoff duration between attempts + MaxInterval time.Duration // maximum backoff duration +} + +// HashPolicyType specifies the type of HashPolicy from a received RDS Response. +type HashPolicyType int + +const ( + // HashPolicyTypeHeader specifies to hash a Header in the incoming request. + HashPolicyTypeHeader HashPolicyType = iota + // HashPolicyTypeChannelID specifies to hash a unique Identifier of the + // Channel. In grpc-go, this will be done using the ClientConn pointer. + HashPolicyTypeChannelID +) + +// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing +// load balancer. +type HashPolicy struct { + HashPolicyType HashPolicyType + Terminal bool + // Fields used for type HEADER. + HeaderName string + Regex *regexp.Regexp + RegexSubstitution string } +// RouteAction is the action of the route from a received RDS response. +type RouteAction int + +const ( + // RouteActionUnsupported are routing types currently unsupported by grpc. + // According to A36, "A Route with an inappropriate action causes RPCs + // matching that route to fail." + RouteActionUnsupported RouteAction = iota + // RouteActionRoute is the expected route type on the client side. Route + // represents routing a request to some upstream cluster. On the client + // side, if an RPC matches to a route that is not RouteActionRoute, the RPC + // will fail according to A36. + RouteActionRoute + // RouteActionNonForwardingAction is the expected route type on the server + // side. NonForwardingAction represents when a route will generate a + // response directly, without forwarding to an upstream host. + RouteActionNonForwardingAction +) + // Route is both a specification of how to match a request as well as an // indication of the action to take upon match. type Route struct { @@ -281,6 +343,8 @@ type Route struct { Headers []*HeaderMatcher Fraction *uint32 + HashPolicies []*HashPolicy + // If the matchers above indicate a match, the below configuration is used. WeightedClusters map[string]WeightedCluster // If MaxStreamDuration is nil, it indicates neither of the route action's @@ -294,6 +358,9 @@ type Route struct { // unused if the matching WeightedCluster contains an override for that // filter. HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig + + RouteAction RouteAction } // WeightedCluster contains settings for an xds RouteAction.WeightedCluster. @@ -351,13 +418,45 @@ type SecurityConfig struct { // - If the peer certificate contains a wildcard DNS SAN, and an `exact` // matcher is configured, a wildcard DNS match is performed instead of a // regular string comparison. - SubjectAltNameMatchers []xds.StringMatcher + SubjectAltNameMatchers []matcher.StringMatcher // RequireClientCert indicates if the server handshake process expects the // client to present a certificate. Set to true when performing mTLS. Used // only on the server-side. RequireClientCert bool } +// Equal returns true if sc is equal to other. +func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + } + switch { + case sc.RootInstanceName != other.RootInstanceName: + return false + case sc.RootCertName != other.RootCertName: + return false + case sc.IdentityInstanceName != other.IdentityInstanceName: + return false + case sc.IdentityCertName != other.IdentityCertName: + return false + case sc.RequireClientCert != other.RequireClientCert: + return false + default: + if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { + return false + } + for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { + if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { + return false + } + } + } + return true +} + // ClusterType is the type of cluster from a received CDS response. type ClusterType int @@ -374,26 +473,47 @@ const ( ClusterTypeAggregate ) +// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its +// config. +type ClusterLBPolicyRingHash struct { + MinimumRingSize uint64 + MaximumRingSize uint64 +} + // ClusterUpdate contains information from a received CDS response, which is of // interest to the registered CDS watcher. type ClusterUpdate struct { ClusterType ClusterType - // ServiceName is the service name corresponding to the clusterName which - // is being watched for through CDS. - ServiceName string + // ClusterName is the clusterName being watched for through CDS. + ClusterName string + // EDSServiceName is an optional name for EDS. If it's not set, the balancer + // should watch ClusterName for the EDS resources. + EDSServiceName string // EnableLRS indicates whether or not load should be reported through LRS. EnableLRS bool // SecurityCfg contains security configuration sent by the control plane. SecurityCfg *SecurityConfig // MaxRequests for circuit breaking, if any (otherwise nil). MaxRequests *uint32 - - // Raw is the resource from the xds response. - Raw *anypb.Any - + // DNSHostName is used only for cluster type DNS. It's the DNS name to + // resolve in "host:port" form + DNSHostName string // PrioritizedClusterNames is used only for cluster type aggregate. It represents // a prioritized list of cluster names. PrioritizedClusterNames []string + + // LBPolicy is the lb policy for this cluster. + // + // This only support round_robin and ring_hash. + // - if it's nil, the lb policy is round_robin + // - if it's not nil, the lb policy is ring_hash, the this field has the config. + // + // When we add more support policies, this can be made an interface, and + // will be set to different types based on the policy type. + LBPolicy *ClusterLBPolicyRingHash + + // Raw is the resource from the xds response. + Raw *anypb.Any } // OverloadDropConfig contains the config to drop overloads. @@ -575,7 +695,7 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) ( // BootstrapConfig returns the configuration read from the bootstrap file. // Callers must treat the return value as read-only. -func (c *Client) BootstrapConfig() *bootstrap.Config { +func (c *clientRefCounted) BootstrapConfig() *bootstrap.Config { return c.config } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/dump.go similarity index 99% rename from vendor/google.golang.org/grpc/xds/internal/client/dump.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/dump.go index 3fd18f6103..db9b474f37 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/dump.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/dump.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import anypb "github.com/golang/protobuf/ptypes/any" diff --git a/vendor/google.golang.org/grpc/xds/internal/client/errors.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/errors.go similarity index 98% rename from vendor/google.golang.org/grpc/xds/internal/client/errors.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/errors.go index 34ae2738db..4d6cdaaf9b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/errors.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/errors.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import "fmt" diff --git a/vendor/google.golang.org/grpc/xds/internal/client/filter_chain.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/filter_chain.go similarity index 70% rename from vendor/google.golang.org/grpc/xds/internal/client/filter_chain.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/filter_chain.go index 66d26d03b6..3b010ebdb9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/filter_chain.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/filter_chain.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "errors" @@ -24,9 +24,13 @@ import ( "net" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" "github.com/golang/protobuf/proto" - + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/version" ) @@ -50,14 +54,92 @@ const ( // FilterChain captures information from within a FilterChain message in a // Listener resource. -// -// Currently, this simply contains the security configuration found in the -// 'transport_socket' field of the filter chain. The actual set of filters -// associated with this filter chain are not captured here, since we do not -// support these filters on the server-side yet. type FilterChain struct { // SecurityCfg contains transport socket security configuration. SecurityCfg *SecurityConfig + // HTTPFilters represent the HTTP Filters that comprise this FilterChain. + HTTPFilters []HTTPFilter + // RouteConfigName is the route configuration name for this FilterChain. + // + // Only one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned for this filter chain. + // + // Only one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate +} + +// VirtualHostWithInterceptors captures information present in a VirtualHost +// update, and also contains routes with instantiated HTTP Filters. +type VirtualHostWithInterceptors struct { + // Domains are the domain names which map to this Virtual Host. On the + // server side, this will be dictated by the :authority header of the + // incoming RPC. + Domains []string + // Routes are the Routes for this Virtual Host. + Routes []RouteWithInterceptors +} + +// RouteWithInterceptors captures information in a Route, and contains +// a usable matcher and also instantiated HTTP Filters. +type RouteWithInterceptors struct { + // M is the matcher used to match to this route. + M *CompositeMatcher + // RouteAction is the type of routing action to initiate once matched to. + RouteAction RouteAction + // Interceptors are interceptors instantiated for this route. These will be + // constructed from a combination of the top level configuration and any + // HTTP Filter overrides present in Virtual Host or Route. + Interceptors []resolver.ServerInterceptor +} + +// ConstructUsableRouteConfiguration takes Route Configuration and converts it +// into matchable route configuration, with instantiated HTTP Filters per route. +func (f *FilterChain) ConstructUsableRouteConfiguration(config RouteConfigUpdate) ([]VirtualHostWithInterceptors, error) { + vhs := make([]VirtualHostWithInterceptors, len(config.VirtualHosts)) + for _, vh := range config.VirtualHosts { + vhwi, err := f.convertVirtualHost(vh) + if err != nil { + return nil, fmt.Errorf("virtual host construction: %v", err) + } + vhs = append(vhs, vhwi) + } + return vhs, nil +} + +func (f *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostWithInterceptors, error) { + rs := make([]RouteWithInterceptors, len(virtualHost.Routes)) + for i, r := range virtualHost.Routes { + var err error + rs[i].RouteAction = r.RouteAction + rs[i].M, err = RouteToMatcher(r) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("matcher construction: %v", err) + } + for _, filter := range f.HTTPFilters { + // Route is highest priority on server side, as there is no concept + // of an upstream cluster on server side. + override := r.HTTPFilterConfigOverride[filter.Name] + if override == nil { + // Virtual Host is second priority. + override = virtualHost.HTTPFilterConfigOverride[filter.Name] + } + sb, ok := filter.Filter.(httpfilter.ServerInterceptorBuilder) + if !ok { + // Should not happen if it passed xdsClient validation. + return VirtualHostWithInterceptors{}, fmt.Errorf("filter does not support use in server") + } + si, err := sb.BuildServerInterceptor(filter.Config, override) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("filter construction: %v", err) + } + if si != nil { + rs[i].Interceptors = append(rs[i].Interceptors, si) + } + } + } + return VirtualHostWithInterceptors{Domains: virtualHost.Domains, Routes: rs}, nil } // SourceType specifies the connection source IP match type. @@ -113,6 +195,11 @@ type FilterChainManager struct { dstPrefixes []*destPrefixEntry def *FilterChain // Default filter chain, if specified. + + // RouteConfigNames are the route configuration names which need to be + // dynamically queried for RDS Configuration for any FilterChains which + // specify to load RDS Configuration dynamically. + RouteConfigNames map[string]bool } // destPrefixEntry is the value type of the map indexed on destination prefixes. @@ -162,7 +249,10 @@ type sourcePrefixEntry struct { // create a FilterChainManager. func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { // Parse all the filter chains and build the internal data structures. - fci := &FilterChainManager{dstPrefixMap: make(map[string]*destPrefixEntry)} + fci := &FilterChainManager{ + dstPrefixMap: make(map[string]*destPrefixEntry), + RouteConfigNames: make(map[string]bool), + } if err := fci.addFilterChains(lis.GetFilterChains()); err != nil { return nil, err } @@ -191,7 +281,7 @@ func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, err var def *FilterChain if dfc := lis.GetDefaultFilterChain(); dfc != nil { var err error - if def, err = filterChainFromProto(dfc); err != nil { + if def, err = fci.filterChainFromProto(dfc); err != nil { return nil, err } } @@ -227,8 +317,9 @@ func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) } func (fci *FilterChainManager) addFilterChainsForDestPrefixes(fc *v3listenerpb.FilterChain) error { - var dstPrefixes []*net.IPNet - for _, pr := range fc.GetFilterChainMatch().GetPrefixRanges() { + ranges := fc.GetFilterChainMatch().GetPrefixRanges() + dstPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range ranges { cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) _, ipnet, err := net.ParseCIDR(cidr) if err != nil { @@ -327,7 +418,8 @@ func (fci *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefix // structures and delegates control to addFilterChainsForSourcePorts to continue // building the internal data structure. func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, fc *v3listenerpb.FilterChain) error { - var srcPrefixes []*net.IPNet + ranges := fc.GetFilterChainMatch().GetSourcePrefixRanges() + srcPrefixes := make([]*net.IPNet, 0, len(ranges)) for _, pr := range fc.GetFilterChainMatch().GetSourcePrefixRanges() { cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) _, ipnet, err := net.ParseCIDR(cidr) @@ -367,12 +459,13 @@ func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map // It is here that we determine if there are multiple filter chains with // overlapping matching rules. func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, fcProto *v3listenerpb.FilterChain) error { - var srcPorts []int - for _, port := range fcProto.GetFilterChainMatch().GetSourcePorts() { + ports := fcProto.GetFilterChainMatch().GetSourcePorts() + srcPorts := make([]int, 0, len(ports)) + for _, port := range ports { srcPorts = append(srcPorts, int(port)) } - fc, err := filterChainFromProto(fcProto) + fc, err := fci.filterChainFromProto(fcProto) if err != nil { return err } @@ -395,16 +488,26 @@ func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePre } // filterChainFromProto extracts the relevant information from the FilterChain -// proto and stores it in our internal representation. Currently, we only -// process the security configuration stored in the transport_socket field. -func filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { +// proto and stores it in our internal representation. It also persists any +// RouteNames which need to be queried dynamically via RDS. +func (fci *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { + filterChain, err := processNetworkFilters(fc.GetFilters()) + if err != nil { + return nil, err + } + // These route names will be dynamically queried via RDS in the wrapped + // listener, which receives the LDS response, if specified for the filter + // chain. + if filterChain.RouteConfigName != "" { + fci.RouteConfigNames[filterChain.RouteConfigName] = true + } // If the transport_socket field is not specified, it means that the control // plane has not sent us any security config. This is fine and the server // will use the fallback credentials configured as part of the // xdsCredentials. ts := fc.GetTransportSocket() if ts == nil { - return &FilterChain{}, nil + return filterChain, nil } if name := ts.GetName(); name != transportSocketName { return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) @@ -417,21 +520,125 @@ func filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil { return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) } + if downstreamCtx.GetRequireSni().GetValue() { + return nil, fmt.Errorf("require_sni field set to true in DownstreamTlsContext message: %v", downstreamCtx) + } + if downstreamCtx.GetOcspStaplePolicy() != v3tlspb.DownstreamTlsContext_LENIENT_STAPLING { + return nil, fmt.Errorf("ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message: %v", downstreamCtx) + } + // The following fields from `DownstreamTlsContext` are ignore: + // - disable_stateless_session_resumption + // - session_ticket_keys + // - session_ticket_keys_sds_secret_config + // - session_timeout if downstreamCtx.GetCommonTlsContext() == nil { return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") } - sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext()) + sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext(), true) if err != nil { return nil, err } - if sc.IdentityInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + if sc == nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + return filterChain, nil } sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() if sc.RequireClientCert && sc.RootInstanceName == "" { return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") } - return &FilterChain{SecurityCfg: sc}, nil + filterChain.SecurityCfg = sc + return filterChain, nil +} + +func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) { + filterChain := &FilterChain{} + seenNames := make(map[string]bool, len(filters)) + seenHCM := false + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, fmt.Errorf("network filters {%+v} is missing name field in filter: {%+v}", filters, filter) + } + if seenNames[name] { + return nil, fmt.Errorf("network filters {%+v} has duplicate filter name %q", filters, name) + } + seenNames[name] = true + + // Network filters have a oneof field named `config_type` where we + // only support `TypedConfig` variant. + switch typ := filter.GetConfigType().(type) { + case *v3listenerpb.Filter_TypedConfig: + // The typed_config field has an `anypb.Any` proto which could + // directly contain the serialized bytes of the actual filter + // configuration, or it could be encoded as a `TypedStruct`. + // TODO: Add support for `TypedStruct`. + tc := filter.GetTypedConfig() + + // The only network filter that we currently support is the v3 + // HttpConnectionManager. So, we can directly check the type_url + // and unmarshal the config. + // TODO: Implement a registry of supported network filters (like + // we have for HTTP filters), when we have to support network + // filters other than HttpConnectionManager. + if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { + return nil, fmt.Errorf("network filters {%+v} has unsupported network filter %q in filter {%+v}", filters, tc.GetTypeUrl(), filter) + } + hcm := &v3httppb.HttpConnectionManager{} + if err := ptypes.UnmarshalAny(tc, hcm); err != nil { + return nil, fmt.Errorf("network filters {%+v} failed unmarshaling of network filter {%+v}: %v", filters, filter, err) + } + // "Any filters after HttpConnectionManager should be ignored during + // connection processing but still be considered for validity. + // HTTPConnectionManager must have valid http_filters." - A36 + filters, err := processHTTPFilters(hcm.GetHttpFilters(), true) + if err != nil { + return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}: %v", filters, hcm.GetHttpFilters(), err) + } + if !seenHCM { + // TODO: Implement terminal filter logic, as per A36. + filterChain.HTTPFilters = filters + seenHCM = true + if !env.RBACSupport { + continue + } + switch hcm.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if hcm.GetRds().GetConfigSource().GetAds() == nil { + return nil, fmt.Errorf("ConfigSource is not ADS: %+v", hcm) + } + name := hcm.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", hcm) + } + filterChain.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + // "RouteConfiguration validation logic inherits all + // previous validations made for client-side usage as RDS + // does not distinguish between client-side and + // server-side." - A36 + // Can specify v3 here, as will never get to this function + // if v2. + routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig(), nil, false) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + filterChain.InlineRouteConfig = &routeU + case nil: + // No-op, as no route specifier is a valid configuration on + // the server side. + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", hcm.RouteSpecifier) + } + } + default: + return nil, fmt.Errorf("network filters {%+v} has unsupported config_type %T in filter %s", filters, typ, filter.GetName()) + } + } + if !seenHCM { + return nil, fmt.Errorf("network filters {%+v} missing HttpConnectionManager filter", filters) + } + return filterChain, nil } // FilterChainLookupParams wraps parameters to be passed to Lookup. diff --git a/vendor/google.golang.org/grpc/xds/internal/client/load/reporter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go similarity index 100% rename from vendor/google.golang.org/grpc/xds/internal/client/load/reporter.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/load/reporter.go diff --git a/vendor/google.golang.org/grpc/xds/internal/client/load/store.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go similarity index 100% rename from vendor/google.golang.org/grpc/xds/internal/client/load/store.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go diff --git a/vendor/google.golang.org/grpc/xds/internal/client/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go similarity index 98% rename from vendor/google.golang.org/grpc/xds/internal/client/loadreport.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go index be42a6e0c3..32a71dada7 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/loadreport.go @@ -15,13 +15,13 @@ * limitations under the License. */ -package client +package xdsclient import ( "context" "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) // ReportLoad starts an load reporting stream to the given server. If the server diff --git a/vendor/google.golang.org/grpc/xds/internal/client/logging.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go similarity index 98% rename from vendor/google.golang.org/grpc/xds/internal/client/logging.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go index bff3fb1d3d..e28ea0d041 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/logging.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/logging.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "fmt" diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/matcher.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/matcher.go new file mode 100644 index 0000000000..e663e02769 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/matcher.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/metadata" +) + +// RouteToMatcher converts a route to a Matcher to match incoming RPC's against. +func RouteToMatcher(r *Route) (*CompositeMatcher, error) { + var pm pathMatcher + switch { + case r.Regex != nil: + pm = newPathRegexMatcher(r.Regex) + case r.Path != nil: + pm = newPathExactMatcher(*r.Path, r.CaseInsensitive) + case r.Prefix != nil: + pm = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive) + default: + return nil, fmt.Errorf("illegal route: missing path_matcher") + } + + headerMatchers := make([]matcher.HeaderMatcher, 0, len(r.Headers)) + for _, h := range r.Headers { + var matcherT matcher.HeaderMatcher + switch { + case h.ExactMatch != nil && *h.ExactMatch != "": + matcherT = matcher.NewHeaderExactMatcher(h.Name, *h.ExactMatch) + case h.RegexMatch != nil: + matcherT = matcher.NewHeaderRegexMatcher(h.Name, h.RegexMatch) + case h.PrefixMatch != nil && *h.PrefixMatch != "": + matcherT = matcher.NewHeaderPrefixMatcher(h.Name, *h.PrefixMatch) + case h.SuffixMatch != nil && *h.SuffixMatch != "": + matcherT = matcher.NewHeaderSuffixMatcher(h.Name, *h.SuffixMatch) + case h.RangeMatch != nil: + matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End) + case h.PresentMatch != nil: + matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch) + default: + return nil, fmt.Errorf("illegal route: missing header_match_specifier") + } + if h.InvertMatch != nil && *h.InvertMatch { + matcherT = matcher.NewInvertMatcher(matcherT) + } + headerMatchers = append(headerMatchers, matcherT) + } + + var fractionMatcher *fractionMatcher + if r.Fraction != nil { + fractionMatcher = newFractionMatcher(*r.Fraction) + } + return newCompositeMatcher(pm, headerMatchers, fractionMatcher), nil +} + +// CompositeMatcher is a matcher that holds onto many matchers and aggregates +// the matching results. +type CompositeMatcher struct { + pm pathMatcher + hms []matcher.HeaderMatcher + fm *fractionMatcher +} + +func newCompositeMatcher(pm pathMatcher, hms []matcher.HeaderMatcher, fm *fractionMatcher) *CompositeMatcher { + return &CompositeMatcher{pm: pm, hms: hms, fm: fm} +} + +// Match returns true if all matchers return true. +func (a *CompositeMatcher) Match(info iresolver.RPCInfo) bool { + if a.pm != nil && !a.pm.match(info.Method) { + return false + } + + // Call headerMatchers even if md is nil, because routes may match + // non-presence of some headers. + var md metadata.MD + if info.Context != nil { + md, _ = metadata.FromOutgoingContext(info.Context) + if extraMD, ok := grpcutil.ExtraMetadata(info.Context); ok { + md = metadata.Join(md, extraMD) + // Remove all binary headers. They are hard to match with. May need + // to add back if asked by users. + for k := range md { + if strings.HasSuffix(k, "-bin") { + delete(md, k) + } + } + } + } + for _, m := range a.hms { + if !m.Match(md) { + return false + } + } + + if a.fm != nil && !a.fm.match() { + return false + } + return true +} + +func (a *CompositeMatcher) String() string { + var ret string + if a.pm != nil { + ret += a.pm.String() + } + for _, m := range a.hms { + ret += m.String() + } + if a.fm != nil { + ret += a.fm.String() + } + return ret +} + +type fractionMatcher struct { + fraction int64 // real fraction is fraction/1,000,000. +} + +func newFractionMatcher(fraction uint32) *fractionMatcher { + return &fractionMatcher{fraction: int64(fraction)} +} + +// RandInt63n overwrites grpcrand for control in tests. +var RandInt63n = grpcrand.Int63n + +func (fm *fractionMatcher) match() bool { + t := RandInt63n(1000000) + return t <= fm.fraction +} + +func (fm *fractionMatcher) String() string { + return fmt.Sprintf("fraction:%v", fm.fraction) +} + +type domainMatchType int + +const ( + domainMatchTypeInvalid domainMatchType = iota + domainMatchTypeUniversal + domainMatchTypePrefix + domainMatchTypeSuffix + domainMatchTypeExact +) + +// Exact > Suffix > Prefix > Universal > Invalid. +func (t domainMatchType) betterThan(b domainMatchType) bool { + return t > b +} + +func matchTypeForDomain(d string) domainMatchType { + if d == "" { + return domainMatchTypeInvalid + } + if d == "*" { + return domainMatchTypeUniversal + } + if strings.HasPrefix(d, "*") { + return domainMatchTypeSuffix + } + if strings.HasSuffix(d, "*") { + return domainMatchTypePrefix + } + if strings.Contains(d, "*") { + return domainMatchTypeInvalid + } + return domainMatchTypeExact +} + +func match(domain, host string) (domainMatchType, bool) { + switch typ := matchTypeForDomain(domain); typ { + case domainMatchTypeInvalid: + return typ, false + case domainMatchTypeUniversal: + return typ, true + case domainMatchTypePrefix: + // abc.* + return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) + case domainMatchTypeSuffix: + // *.123 + return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) + case domainMatchTypeExact: + return typ, domain == host + default: + return domainMatchTypeInvalid, false + } +} + +// FindBestMatchingVirtualHost returns the virtual host whose domains field best +// matches host +// +// The domains field support 4 different matching pattern types: +// - Exact match +// - Suffix match (e.g. “*ABC”) +// - Prefix match (e.g. “ABC*) +// - Universal match (e.g. “*”) +// +// The best match is defined as: +// - A match is better if it’s matching pattern type is better +// - Exact match > suffix match > prefix match > universal match +// - If two matches are of the same pattern type, the longer match is better +// - This is to compare the length of the matching pattern, e.g. “*ABCDE” > +// “*ABC” +func FindBestMatchingVirtualHost(host string, vHosts []*VirtualHost) *VirtualHost { // Maybe move this crap to client + var ( + matchVh *VirtualHost + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, host) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} + +// FindBestMatchingVirtualHostServer returns the virtual host whose domains field best +// matches authority. +func FindBestMatchingVirtualHostServer(authority string, vHosts []VirtualHostWithInterceptors) *VirtualHostWithInterceptors { + var ( + matchVh *VirtualHostWithInterceptors + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, authority) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = &vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/matcher_path.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/matcher_path.go similarity index 97% rename from vendor/google.golang.org/grpc/xds/internal/resolver/matcher_path.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/matcher_path.go index 011d1a94c4..a00c6954ef 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/matcher_path.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/matcher_path.go @@ -16,14 +16,14 @@ * */ -package resolver +package xdsclient import ( "regexp" "strings" ) -type pathMatcherInterface interface { +type pathMatcher interface { match(path string) bool String() string } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/requests_counter.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go similarity index 54% rename from vendor/google.golang.org/grpc/xds/internal/client/requests_counter.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go index f033e19209..beed2e9d0a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/requests_counter.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/requests_counter.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "fmt" @@ -24,44 +24,53 @@ import ( "sync/atomic" ) -type servicesRequestsCounter struct { +type clusterNameAndServiceName struct { + clusterName, edsServcieName string +} + +type clusterRequestsCounter struct { mu sync.Mutex - services map[string]*ServiceRequestsCounter + clusters map[clusterNameAndServiceName]*ClusterRequestsCounter } -var src = &servicesRequestsCounter{ - services: make(map[string]*ServiceRequestsCounter), +var src = &clusterRequestsCounter{ + clusters: make(map[clusterNameAndServiceName]*ClusterRequestsCounter), } -// ServiceRequestsCounter is used to track the total inflight requests for a +// ClusterRequestsCounter is used to track the total inflight requests for a // service with the provided name. -type ServiceRequestsCounter struct { - ServiceName string - numRequests uint32 +type ClusterRequestsCounter struct { + ClusterName string + EDSServiceName string + numRequests uint32 } -// GetServiceRequestsCounter returns the ServiceRequestsCounter with the +// GetClusterRequestsCounter returns the ClusterRequestsCounter with the // provided serviceName. If one does not exist, it creates it. -func GetServiceRequestsCounter(serviceName string) *ServiceRequestsCounter { +func GetClusterRequestsCounter(clusterName, edsServiceName string) *ClusterRequestsCounter { src.mu.Lock() defer src.mu.Unlock() - c, ok := src.services[serviceName] + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServcieName: edsServiceName, + } + c, ok := src.clusters[k] if !ok { - c = &ServiceRequestsCounter{ServiceName: serviceName} - src.services[serviceName] = c + c = &ClusterRequestsCounter{ClusterName: clusterName} + src.clusters[k] = c } return c } -// StartRequest starts a request for a service, incrementing its number of +// StartRequest starts a request for a cluster, incrementing its number of // requests by 1. Returns an error if the max number of requests is exceeded. -func (c *ServiceRequestsCounter) StartRequest(max uint32) error { +func (c *ClusterRequestsCounter) StartRequest(max uint32) error { // Note that during race, the limits could be exceeded. This is allowed: // "Since the implementation is eventually consistent, races between threads // may allow limits to be potentially exceeded." // https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/circuit_breaking#arch-overview-circuit-break. if atomic.LoadUint32(&c.numRequests) >= max { - return fmt.Errorf("max requests %v exceeded on service %v", max, c.ServiceName) + return fmt.Errorf("max requests %v exceeded on service %v", max, c.ClusterName) } atomic.AddUint32(&c.numRequests, 1) return nil @@ -69,18 +78,30 @@ func (c *ServiceRequestsCounter) StartRequest(max uint32) error { // EndRequest ends a request for a service, decrementing its number of requests // by 1. -func (c *ServiceRequestsCounter) EndRequest() { +func (c *ClusterRequestsCounter) EndRequest() { atomic.AddUint32(&c.numRequests, ^uint32(0)) } // ClearCounterForTesting clears the counter for the service. Should be only // used in tests. -func ClearCounterForTesting(serviceName string) { +func ClearCounterForTesting(clusterName, edsServiceName string) { src.mu.Lock() defer src.mu.Unlock() - c, ok := src.services[serviceName] + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServcieName: edsServiceName, + } + c, ok := src.clusters[k] if !ok { return } c.numRequests = 0 } + +// ClearAllCountersForTesting clears all the counters. Should be only used in +// tests. +func ClearAllCountersForTesting() { + src.mu.Lock() + defer src.mu.Unlock() + src.clusters = make(map[clusterNameAndServiceName]*ClusterRequestsCounter) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/client/singleton.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go similarity index 63% rename from vendor/google.golang.org/grpc/xds/internal/client/singleton.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go index 99f195341a..f045790e2a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/singleton.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go @@ -16,32 +16,30 @@ * */ -package client +package xdsclient import ( + "bytes" + "encoding/json" "fmt" "sync" "time" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const defaultWatchExpiryTimeout = 15 * time.Second // This is the Client returned by New(). It contains one client implementation, // and maintains the refcount. -var singletonClient = &Client{} +var singletonClient = &clientRefCounted{} // To override in tests. var bootstrapNewConfig = bootstrap.NewConfig -// Client is a full fledged gRPC client which queries a set of discovery APIs -// (collectively termed as xDS) on a remote management server, to discover -// various dynamic resources. -// -// The xds client is a singleton. It will be shared by the xds resolver and +// clientRefCounted is ref-counted, and to be shared by the xds resolver and // balancer implementations, across multiple ClientConns and Servers. -type Client struct { +type clientRefCounted struct { *clientImpl // This mu protects all the fields, including the embedded clientImpl above. @@ -58,7 +56,18 @@ type Client struct { // Note that the first invocation of New() or NewWithConfig() sets the client // singleton. The following calls will return the singleton xds client without // checking or using the config. -func New() (*Client, error) { +func New() (XDSClient, error) { + // This cannot just return newRefCounted(), because in error cases, the + // returned nil is a typed nil (*clientRefCounted), which may cause nil + // checks fail. + c, err := newRefCounted() + if err != nil { + return nil, err + } + return c, nil +} + +func newRefCounted() (*clientRefCounted, error) { singletonClient.mu.Lock() defer singletonClient.mu.Unlock() // If the client implementation was created, increment ref count and return @@ -92,9 +101,9 @@ func New() (*Client, error) { // singleton. The following calls will return the singleton xds client without // checking or using the config. // -// This function is internal only, for c2p resolver to use. DO NOT use this -// elsewhere. Use New() instead. -func NewWithConfig(config *bootstrap.Config) (*Client, error) { +// This function is internal only, for c2p resolver and testing to use. DO NOT +// use this elsewhere. Use New() instead. +func NewWithConfig(config *bootstrap.Config) (XDSClient, error) { singletonClient.mu.Lock() defer singletonClient.mu.Unlock() // If the client implementation was created, increment ref count and return @@ -118,7 +127,7 @@ func NewWithConfig(config *bootstrap.Config) (*Client, error) { // Close closes the client. It does ref count of the xds client implementation, // and closes the gRPC connection to the management server when ref count // reaches 0. -func (c *Client) Close() { +func (c *clientRefCounted) Close() { c.mu.Lock() defer c.mu.Unlock() c.refCount-- @@ -134,10 +143,56 @@ func (c *Client) Close() { // // Note that this function doesn't set the singleton, so that the testing states // don't leak. -func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (*Client, error) { +func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (XDSClient, error) { cl, err := newWithConfig(config, watchExpiryTimeout) if err != nil { return nil, err } - return &Client{clientImpl: cl, refCount: 1}, nil + return &clientRefCounted{clientImpl: cl, refCount: 1}, nil } + +// NewClientWithBootstrapContents returns an xds client for this config, +// separate from the global singleton. This should be used for testing +// purposes only. +func NewClientWithBootstrapContents(contents []byte) (XDSClient, error) { + // Normalize the contents + buf := bytes.Buffer{} + err := json.Indent(&buf, contents, "", "") + if err != nil { + return nil, fmt.Errorf("xds: error normalizing JSON: %v", err) + } + contents = bytes.TrimSpace(buf.Bytes()) + + clientsMu.Lock() + defer clientsMu.Unlock() + if c := clients[string(contents)]; c != nil { + c.mu.Lock() + // Since we don't remove the *Client from the map when it is closed, we + // need to recreate the impl if the ref count dropped to zero. + if c.refCount > 0 { + c.refCount++ + c.mu.Unlock() + return c, nil + } + c.mu.Unlock() + } + + bcfg, err := bootstrap.NewConfigFromContents(contents) + if err != nil { + return nil, fmt.Errorf("xds: error with bootstrap config: %v", err) + } + + cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout) + if err != nil { + return nil, err + } + + c := &clientRefCounted{clientImpl: cImpl, refCount: 1} + clients[string(contents)] = c + return c, nil +} + +var ( + clients = map[string]*clientRefCounted{} + clientsMu sync.Mutex +) diff --git a/vendor/google.golang.org/grpc/xds/internal/client/transport_helper.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport_helper.go similarity index 98% rename from vendor/google.golang.org/grpc/xds/internal/client/transport_helper.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/transport_helper.go index 671e5b3220..4c56daaf01 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/transport_helper.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport_helper.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "context" @@ -24,7 +24,7 @@ import ( "time" "github.com/golang/protobuf/proto" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc" "google.golang.org/grpc/internal/buffer" @@ -342,11 +342,12 @@ func (t *TransportHelper) recv(stream grpc.ClientStream) bool { } } -func mapToSlice(m map[string]bool) (ret []string) { +func mapToSlice(m map[string]bool) []string { + ret := make([]string, 0, len(m)) for i := range m { ret = append(ret, i) } - return + return ret } type watchAction struct { diff --git a/vendor/google.golang.org/grpc/xds/internal/client/v2/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/v2/client.go similarity index 96% rename from vendor/google.golang.org/grpc/xds/internal/client/v2/client.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/v2/client.go index b6bc490812..766db2564b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/v2/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/v2/client.go @@ -27,8 +27,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpclog" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" @@ -125,7 +126,7 @@ func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rTyp if err := stream.Send(req); err != nil { return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err) } - v2c.logger.Debugf("ADS request sent: %v", req) + v2c.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) return nil } @@ -139,11 +140,11 @@ func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { resp, err := stream.Recv() if err != nil { - // TODO: call watch callbacks with error when stream is broken. + v2c.parent.NewConnectionError(err) return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) } v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - v2c.logger.Debugf("ADS response received: %v", resp) + v2c.logger.Debugf("ADS response received: %v", pretty.ToJSON(resp)) return resp, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/v2/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/v2/loadreport.go similarity index 88% rename from vendor/google.golang.org/grpc/xds/internal/client/v2/loadreport.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/v2/loadreport.go index 69405fcd9a..f0034e21c3 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/v2/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/v2/loadreport.go @@ -26,7 +26,8 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/load" v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" v2endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" @@ -57,7 +58,7 @@ func (v2c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { node.ClientFeatures = append(node.ClientFeatures, clientFeatureLRSSendAllClusters) req := &lrspb.LoadStatsRequest{Node: node} - v2c.logger.Infof("lrs: sending init LoadStatsRequest: %v", req) + v2c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) return stream.Send(req) } @@ -71,7 +72,7 @@ func (v2c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time. if err != nil { return nil, 0, fmt.Errorf("lrs: failed to receive first response: %v", err) } - v2c.logger.Infof("lrs: received first LoadStatsResponse: %+v", resp) + v2c.logger.Infof("lrs: received first LoadStatsResponse: %+v", pretty.ToJSON(resp)) interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) if err != nil { @@ -98,24 +99,22 @@ func (v2c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) } - var clusterStats []*v2endpointpb.ClusterStats + clusterStats := make([]*v2endpointpb.ClusterStats, 0, len(loads)) for _, sd := range loads { - var ( - droppedReqs []*v2endpointpb.ClusterStats_DroppedRequests - localityStats []*v2endpointpb.UpstreamLocalityStats - ) + droppedReqs := make([]*v2endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) for category, count := range sd.Drops { droppedReqs = append(droppedReqs, &v2endpointpb.ClusterStats_DroppedRequests{ Category: category, DroppedCount: count, }) } + localityStats := make([]*v2endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) for l, localityData := range sd.LocalityStats { lid, err := internal.LocalityIDFromString(l) if err != nil { return err } - var loadMetricStats []*v2endpointpb.EndpointLoadMetricStats + loadMetricStats := make([]*v2endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) for name, loadData := range localityData.LoadStats { loadMetricStats = append(loadMetricStats, &v2endpointpb.EndpointLoadMetricStats{ MetricName: name, @@ -149,6 +148,6 @@ func (v2c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) } req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} - v2c.logger.Infof("lrs: sending LRS loads: %+v", req) + v2c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) return stream.Send(req) } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/v3/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/v3/client.go similarity index 96% rename from vendor/google.golang.org/grpc/xds/internal/client/v3/client.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/v3/client.go index 55cae56d8c..6088189f97 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/v3/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/v3/client.go @@ -28,8 +28,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpclog" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" @@ -125,7 +126,7 @@ func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rTyp if err := stream.Send(req); err != nil { return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err) } - v3c.logger.Debugf("ADS request sent: %v", req) + v3c.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req)) return nil } @@ -139,11 +140,11 @@ func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { resp, err := stream.Recv() if err != nil { - // TODO: call watch callbacks with error when stream is broken. + v3c.parent.NewConnectionError(err) return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) } v3c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - v3c.logger.Debugf("ADS response received: %+v", resp) + v3c.logger.Debugf("ADS response received: %+v", pretty.ToJSON(resp)) return resp, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/v3/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/v3/loadreport.go similarity index 88% rename from vendor/google.golang.org/grpc/xds/internal/client/v3/loadreport.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/v3/loadreport.go index 74e18632aa..8cdb5476fb 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/v3/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/v3/loadreport.go @@ -26,7 +26,8 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/load" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" @@ -57,7 +58,7 @@ func (v3c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { node.ClientFeatures = append(node.ClientFeatures, clientFeatureLRSSendAllClusters) req := &lrspb.LoadStatsRequest{Node: node} - v3c.logger.Infof("lrs: sending init LoadStatsRequest: %v", req) + v3c.logger.Infof("lrs: sending init LoadStatsRequest: %v", pretty.ToJSON(req)) return stream.Send(req) } @@ -71,7 +72,7 @@ func (v3c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time. if err != nil { return nil, 0, fmt.Errorf("lrs: failed to receive first response: %v", err) } - v3c.logger.Infof("lrs: received first LoadStatsResponse: %+v", resp) + v3c.logger.Infof("lrs: received first LoadStatsResponse: %+v", pretty.ToJSON(resp)) interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) if err != nil { @@ -98,24 +99,22 @@ func (v3c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) } - var clusterStats []*v3endpointpb.ClusterStats + clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) for _, sd := range loads { - var ( - droppedReqs []*v3endpointpb.ClusterStats_DroppedRequests - localityStats []*v3endpointpb.UpstreamLocalityStats - ) + droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) for category, count := range sd.Drops { droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ Category: category, DroppedCount: count, }) } + localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) for l, localityData := range sd.LocalityStats { lid, err := internal.LocalityIDFromString(l) if err != nil { return err } - var loadMetricStats []*v3endpointpb.EndpointLoadMetricStats + loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) for name, loadData := range localityData.LoadStats { loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ MetricName: name, @@ -148,6 +147,6 @@ func (v3c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) } req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} - v3c.logger.Infof("lrs: sending LRS loads: %+v", req) + v3c.logger.Infof("lrs: sending LRS loads: %+v", pretty.ToJSON(req)) return stream.Send(req) } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/watchers.go similarity index 95% rename from vendor/google.golang.org/grpc/xds/internal/client/watchers.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/watchers.go index 9fafe5a60f..e26ed36030 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/watchers.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/watchers.go @@ -16,12 +16,14 @@ * */ -package client +package xdsclient import ( "fmt" "sync" "time" + + "google.golang.org/grpc/internal/pretty" ) type watchInfoState int @@ -64,6 +66,17 @@ func (wi *watchInfo) newUpdate(update interface{}) { wi.c.scheduleCallback(wi, update, nil) } +func (wi *watchInfo) newError(err error) { + wi.mu.Lock() + defer wi.mu.Unlock() + if wi.state == watchInfoStateCanceled { + return + } + wi.state = watchInfoStateRespReceived + wi.expiryTimer.Stop() + wi.sendErrorLocked(err) +} + func (wi *watchInfo) resourceNotFound() { wi.mu.Lock() defer wi.mu.Unlock() @@ -161,22 +174,22 @@ func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { switch wi.rType { case ListenerResource: if v, ok := c.ldsCache[resourceName]; ok { - c.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, v) + c.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) wi.newUpdate(v) } case RouteConfigResource: if v, ok := c.rdsCache[resourceName]; ok { - c.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, v) + c.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) wi.newUpdate(v) } case ClusterResource: if v, ok := c.cdsCache[resourceName]; ok { - c.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, v) + c.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) wi.newUpdate(v) } case EndpointsResource: if v, ok := c.edsCache[resourceName]; ok { - c.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, v) + c.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, pretty.ToJSON(v)) wi.newUpdate(v) } } diff --git a/vendor/google.golang.org/grpc/xds/internal/client/xds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xds.go similarity index 55% rename from vendor/google.golang.org/grpc/xds/internal/client/xds.go rename to vendor/google.golang.org/grpc/xds/internal/xdsclient/xds.go index 0cd373d569..686c52a350 100644 --- a/vendor/google.golang.org/grpc/xds/internal/client/xds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xds.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "errors" @@ -39,10 +39,12 @@ import ( v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/httpfilter" @@ -56,8 +58,8 @@ const transportSocketName = "envoy.transport_sockets.tls" // UnmarshalListener processes resources received in an LDS response, validates // them, and transforms them into a native struct which contains only fields we // are interested in. -func UnmarshalListener(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdate, UpdateMetadata, error) { - update := make(map[string]ListenerUpdate) +func UnmarshalListener(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]ListenerUpdateErrTuple) md, err := processAllResources(version, resources, logger, update) return update, md, err } @@ -72,7 +74,7 @@ func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (stri if err := proto.Unmarshal(r.GetValue(), lis); err != nil { return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, lis) + logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, pretty.ToJSON(lis)) lu, err := processListener(lis, logger, v2) if err != nil { @@ -177,7 +179,7 @@ func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Fi } func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { - if !env.FaultInjectionSupport || len(cfgs) == 0 { + if len(cfgs) == 0 { return nil, nil } m := make(map[string]httpfilter.FilterConfig) @@ -206,10 +208,6 @@ func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilt } func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { - if !env.FaultInjectionSupport { - return nil, nil - } - ret := make([]HTTPFilter, 0, len(filters)) seenNames := make(map[string]bool, len(filters)) for _, filter := range filters { @@ -247,6 +245,20 @@ func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilt // Save name/config ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) } + // "Validation will fail if a terminal filter is not the last filter in the + // chain or if a non-terminal filter is the last filter in the chain." - A39 + if len(ret) == 0 { + return nil, fmt.Errorf("http filters list is empty") + } + var i int + for ; i < len(ret)-1; i++ { + if ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) + } + } + if !ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) + } return ret, nil } @@ -271,13 +283,6 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err Port: strconv.Itoa(int(sockAddr.GetPortValue())), }, } - chains := lis.GetFilterChains() - if def := lis.GetDefaultFilterChain(); def != nil { - chains = append(chains, def) - } - if err := validateNetworkFilterChains(chains); err != nil { - return nil, err - } fcMgr, err := NewFilterChainManager(lis) if err != nil { @@ -287,67 +292,12 @@ func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, err return lu, nil } -func validateNetworkFilterChains(filterChains []*v3listenerpb.FilterChain) error { - for _, filterChain := range filterChains { - seenNames := make(map[string]bool, len(filterChain.GetFilters())) - seenHCM := false - for _, filter := range filterChain.GetFilters() { - name := filter.GetName() - if name == "" { - return fmt.Errorf("filter chain {%+v} is missing name field in filter: {%+v}", filterChain, filter) - } - if seenNames[name] { - return fmt.Errorf("filter chain {%+v} has duplicate filter name %q", filterChain, name) - } - seenNames[name] = true - - // Network filters have a oneof field named `config_type` where we - // only support `TypedConfig` variant. - switch typ := filter.GetConfigType().(type) { - case *v3listenerpb.Filter_TypedConfig: - // The typed_config field has an `anypb.Any` proto which could - // directly contain the serialized bytes of the actual filter - // configuration, or it could be encoded as a `TypedStruct`. - // TODO: Add support for `TypedStruct`. - tc := filter.GetTypedConfig() - - // The only network filter that we currently support is the v3 - // HttpConnectionManager. So, we can directly check the type_url - // and unmarshal the config. - // TODO: Implement a registry of supported network filters (like - // we have for HTTP filters), when we have to support network - // filters other than HttpConnectionManager. - if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { - return fmt.Errorf("filter chain {%+v} has unsupported network filter %q in filter {%+v}", filterChain, tc.GetTypeUrl(), filter) - } - hcm := &v3httppb.HttpConnectionManager{} - if err := ptypes.UnmarshalAny(tc, hcm); err != nil { - return fmt.Errorf("filter chain {%+v} failed unmarshaling of network filter {%+v}: %v", filterChain, filter, err) - } - // We currently don't support HTTP filters on the server-side. - // We will be adding support for it in the future. So, we want - // to make sure that the http_filters configuration is valid. - if _, err := processHTTPFilters(hcm.GetHttpFilters(), true); err != nil { - return err - } - seenHCM = true - default: - return fmt.Errorf("filter chain {%+v} has unsupported config_type %T in filter %s", filterChain, typ, filter.GetName()) - } - } - if !seenHCM { - return fmt.Errorf("filter chain {%+v} missing HttpConnectionManager filter", filterChain) - } - } - return nil -} - // UnmarshalRouteConfig processes resources received in an RDS response, // validates them, and transforms them into a native struct which contains only // fields we are interested in. The provided hostname determines the route // configuration resources of interest. -func UnmarshalRouteConfig(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdate, UpdateMetadata, error) { - update := make(map[string]RouteConfigUpdate) +func UnmarshalRouteConfig(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]RouteConfigUpdateErrTuple) md, err := processAllResources(version, resources, logger, update) return update, md, err } @@ -360,7 +310,7 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s if err := proto.Unmarshal(r.GetValue(), rc); err != nil { return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, rc) + logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, pretty.ToJSON(rc)) // TODO: Pass version.TransportAPI instead of relying upon the type URL v2 := r.GetTypeUrl() == version.V2RouteConfigURL @@ -380,7 +330,7 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s // VirtualHost whose domain field matches the server name from the URI passed // to the gRPC channel, and it contains a clusterName or a weighted cluster. // -// The RouteConfiguration includes a list of VirtualHosts, which may have zero +// The RouteConfiguration includes a list of virtualHosts, which may have zero // or more elements. We are interested in the element whose domains field // matches the server name specified in the "xds:" URI. The only field in the // VirtualHost proto that the we are interested in is the list of routes. We @@ -389,15 +339,20 @@ func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (s // message, the cluster field will contain the clusterName or weighted clusters // we are looking for. func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { - var vhs []*VirtualHost + vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) for _, vh := range rc.GetVirtualHosts() { routes, err := routesProtoToSlice(vh.Routes, logger, v2) if err != nil { return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) } + rc, err := generateRetryConfig(vh.GetRetryPolicy()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } vhOut := &VirtualHost{ - Domains: vh.GetDomains(), - Routes: routes, + Domains: vh.GetDomains(), + Routes: routes, + RetryConfig: rc, } if !v2 { cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) @@ -411,9 +366,62 @@ func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, l return RouteConfigUpdate{VirtualHosts: vhs}, nil } +func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { + if !env.RetrySupport || rp == nil { + return nil, nil + } + + cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} + for _, s := range strings.Split(rp.GetRetryOn(), ",") { + switch strings.TrimSpace(strings.ToLower(s)) { + case "cancelled": + cfg.RetryOn[codes.Canceled] = true + case "deadline-exceeded": + cfg.RetryOn[codes.DeadlineExceeded] = true + case "internal": + cfg.RetryOn[codes.Internal] = true + case "resource-exhausted": + cfg.RetryOn[codes.ResourceExhausted] = true + case "unavailable": + cfg.RetryOn[codes.Unavailable] = true + } + } + + if rp.NumRetries == nil { + cfg.NumRetries = 1 + } else { + cfg.NumRetries = rp.GetNumRetries().Value + if cfg.NumRetries < 1 { + return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) + } + } + + backoff := rp.GetRetryBackOff() + if backoff == nil { + cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond + } else { + cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() + if cfg.RetryBackoff.BaseInterval <= 0 { + return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) + } + } + if max := backoff.GetMaxInterval(); max == nil { + cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval + } else { + cfg.RetryBackoff.MaxInterval = max.AsDuration() + if cfg.RetryBackoff.MaxInterval <= 0 { + return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) + } + } + + if len(cfg.RetryOn) == 0 { + return &RetryConfig{}, nil + } + return cfg, nil +} + func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { var routesRet []*Route - for _, r := range routes { match := r.GetMatch() if match == nil { @@ -497,49 +505,82 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, route.Fraction = &n } - route.WeightedClusters = make(map[string]WeightedCluster) - action := r.GetRoute() - switch a := action.GetClusterSpecifier().(type) { - case *v3routepb.RouteAction_Cluster: - route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} - case *v3routepb.RouteAction_WeightedClusters: - wcs := a.WeightedClusters - var totalWeight uint32 - for _, c := range wcs.Clusters { - w := c.GetWeight().GetValue() - if w == 0 { - continue + switch r.GetAction().(type) { + case *v3routepb.Route_Route: + route.WeightedClusters = make(map[string]WeightedCluster) + action := r.GetRoute() + + // Hash Policies are only applicable for a Ring Hash LB. + if env.RingHashSupport { + hp, err := hashPoliciesProtoToSlice(action.HashPolicy, logger) + if err != nil { + return nil, err } - wc := WeightedCluster{Weight: w} - if !v2 { - cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) - if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + route.HashPolicies = hp + } + + switch a := action.GetClusterSpecifier().(type) { + case *v3routepb.RouteAction_Cluster: + route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} + case *v3routepb.RouteAction_WeightedClusters: + wcs := a.WeightedClusters + var totalWeight uint32 + for _, c := range wcs.Clusters { + w := c.GetWeight().GetValue() + if w == 0 { + continue + } + wc := WeightedCluster{Weight: w} + if !v2 { + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + } + wc.HTTPFilterConfigOverride = cfgs } - wc.HTTPFilterConfigOverride = cfgs + route.WeightedClusters[c.GetName()] = wc + totalWeight += w } - route.WeightedClusters[c.GetName()] = wc - totalWeight += w + // envoy xds doc + // default TotalWeight https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto.html#envoy-v3-api-field-config-route-v3-weightedcluster-total-weight + wantTotalWeight := uint32(100) + if tw := wcs.GetTotalWeight(); tw != nil { + wantTotalWeight = tw.GetValue() + } + if totalWeight != wantTotalWeight { + return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, expected total weight from response: %v", r, a, totalWeight, wantTotalWeight) + } + if totalWeight == 0 { + return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + } + case *v3routepb.RouteAction_ClusterHeader: + continue } - if totalWeight != wcs.GetTotalWeight().GetValue() { - return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, want %v", r, a, wcs.GetTotalWeight().GetValue(), totalWeight) + + msd := action.GetMaxStreamDuration() + // Prefer grpc_timeout_header_max, if set. + dur := msd.GetGrpcTimeoutHeaderMax() + if dur == nil { + dur = msd.GetMaxStreamDuration() } - if totalWeight == 0 { - return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + if dur != nil { + d := dur.AsDuration() + route.MaxStreamDuration = &d } - case *v3routepb.RouteAction_ClusterHeader: - continue - } - msd := action.GetMaxStreamDuration() - // Prefer grpc_timeout_header_max, if set. - dur := msd.GetGrpcTimeoutHeaderMax() - if dur == nil { - dur = msd.GetMaxStreamDuration() - } - if dur != nil { - d := dur.AsDuration() - route.MaxStreamDuration = &d + var err error + route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) + if err != nil { + return nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) + } + + route.RouteAction = RouteActionRoute + + case *v3routepb.Route_NonForwardingAction: + // Expected to be used on server side. + route.RouteAction = RouteActionNonForwardingAction + default: + route.RouteAction = RouteActionUnsupported } if !v2 { @@ -554,11 +595,44 @@ func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, return routesRet, nil } +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy, logger *grpclog.PrefixLogger) ([]*HashPolicy, error) { + var hashPoliciesRet []*HashPolicy + for _, p := range policies { + policy := HashPolicy{Terminal: p.Terminal} + switch p.GetPolicySpecifier().(type) { + case *v3routepb.RouteAction_HashPolicy_Header_: + policy.HashPolicyType = HashPolicyTypeHeader + policy.HeaderName = p.GetHeader().GetHeaderName() + if rr := p.GetHeader().GetRegexRewrite(); rr != nil { + regex := rr.GetPattern().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + } + policy.Regex = re + policy.RegexSubstitution = rr.GetSubstitution() + } + case *v3routepb.RouteAction_HashPolicy_FilterState_: + if p.GetFilterState().GetKey() != "io.grpc.channel_id" { + logger.Infof("hash policy %+v contains an invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + continue + } + policy.HashPolicyType = HashPolicyTypeChannelID + default: + logger.Infof("hash policy %T is an unsupported hash policy", p.GetPolicySpecifier()) + continue + } + + hashPoliciesRet = append(hashPoliciesRet, &policy) + } + return hashPoliciesRet, nil +} + // UnmarshalCluster processes resources received in an CDS response, validates // them, and transforms them into a native struct which contains only fields we // are interested in. -func UnmarshalCluster(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdate, UpdateMetadata, error) { - update := make(map[string]ClusterUpdate) +func UnmarshalCluster(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]ClusterUpdateErrTuple) md, err := processAllResources(version, resources, logger, update) return update, md, err } @@ -572,7 +646,7 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, cluster) + logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, pretty.ToJSON(cluster)) cu, err := validateClusterAndConstructClusterUpdate(cluster) if err != nil { return cluster.GetName(), ClusterUpdate{}, err @@ -582,71 +656,147 @@ func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (strin return cluster.GetName(), cu, nil } -func clusterTypeFromCluster(cluster *v3clusterpb.Cluster) (ClusterType, string, []string, error) { - if cluster.GetType() == v3clusterpb.Cluster_EDS { - if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { - return 0, "", nil, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M +) + +func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { + var lbPolicy *ClusterLBPolicyRingHash + switch cluster.GetLbPolicy() { + case v3clusterpb.Cluster_ROUND_ROBIN: + lbPolicy = nil // The default is round_robin, and there's no config to set. + case v3clusterpb.Cluster_RING_HASH: + if !env.RingHashSupport { + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } - // If the Cluster message in the CDS response did not contain a - // serviceName, we will just use the clusterName for EDS. - if cluster.GetEdsClusterConfig().GetServiceName() == "" { - return ClusterTypeEDS, cluster.GetName(), nil, nil + rhc := cluster.GetRingHashLbConfig() + if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { + return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) } - return ClusterTypeEDS, cluster.GetEdsClusterConfig().GetServiceName(), nil, nil + // Minimum defaults to 1024 entries, and limited to 8M entries Maximum + // defaults to 8M entries, and limited to 8M entries + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhc.GetMinimumRingSize(); min != nil { + if min.GetValue() > ringHashSizeUpperBound { + return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash mininum ring size %v in response: %+v", min.GetValue(), cluster) + } + minSize = min.GetValue() + } + if max := rhc.GetMaximumRingSize(); max != nil { + if max.GetValue() > ringHashSizeUpperBound { + return ClusterUpdate{}, fmt.Errorf("unexpected ring_hash maxinum ring size %v in response: %+v", max.GetValue(), cluster) + } + maxSize = max.GetValue() + } + if minSize > maxSize { + return ClusterUpdate{}, fmt.Errorf("ring_hash config min size %v is greater than max %v", minSize, maxSize) + } + lbPolicy = &ClusterLBPolicyRingHash{MinimumRingSize: minSize, MaximumRingSize: maxSize} + default: + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } - if !env.AggregateAndDNSSupportEnv { - return 0, "", nil, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + // Process security configuration received from the control plane iff the + // corresponding environment variable is set. + var sc *SecurityConfig + if env.ClientSideSecuritySupport { + var err error + if sc, err = securityConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } } - if cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS { - return ClusterTypeLogicalDNS, cluster.GetName(), nil, nil + ret := ClusterUpdate{ + ClusterName: cluster.GetName(), + EnableLRS: cluster.GetLrsServer().GetSelf() != nil, + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + LBPolicy: lbPolicy, } - if cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate" { - // Loop through ClusterConfig here to get cluster names. + // Validate and set cluster type from the response. + switch { + case cluster.GetType() == v3clusterpb.Cluster_EDS: + if cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil { + return ClusterUpdate{}, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) + } + ret.ClusterType = ClusterTypeEDS + ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + return ret, nil + case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + if !env.AggregateAndDNSSupportEnv { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } + ret.ClusterType = ClusterTypeLogicalDNS + dnsHN, err := dnsHostNameFromCluster(cluster) + if err != nil { + return ClusterUpdate{}, err + } + ret.DNSHostName = dnsHN + return ret, nil + case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + if !env.AggregateAndDNSSupportEnv { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } clusters := &v3aggregateclusterpb.ClusterConfig{} if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { - return 0, "", nil, fmt.Errorf("failed to unmarshal resource: %v", err) + return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - return ClusterTypeAggregate, cluster.GetName(), clusters.Clusters, nil + ret.ClusterType = ClusterTypeAggregate + ret.PrioritizedClusterNames = clusters.Clusters + return ret, nil + default: + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) } - return 0, "", nil, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) } -func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false} - if cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN { - return emptyUpdate, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) +// dnsHostNameFromCluster extracts the DNS host name from the cluster's load +// assignment. +// +// There should be exactly one locality, with one endpoint, whose address +// contains the address and port. +func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { + loadAssignment := cluster.GetLoadAssignment() + if loadAssignment == nil { + return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") } - clusterType, serviceName, prioritizedClusters, err := clusterTypeFromCluster(cluster) - if err != nil { - return emptyUpdate, err + if len(loadAssignment.GetEndpoints()) != 1 { + return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) } - - // Process security configuration received from the control plane iff the - // corresponding environment variable is set. - var sc *SecurityConfig - if env.ClientSideSecuritySupport { - var err error - if sc, err = securityConfigFromCluster(cluster); err != nil { - return emptyUpdate, err - } + endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() + if len(endpoints) != 1 { + return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) } - - return ClusterUpdate{ - ClusterType: clusterType, - ServiceName: serviceName, - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), - PrioritizedClusterNames: prioritizedClusters, - }, nil + endpoint := endpoints[0].GetEndpoint() + if endpoint == nil { + return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") + } + socketAddr := endpoint.GetAddress().GetSocketAddress() + if socketAddr == nil { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") + } + if socketAddr.GetResolverName() != "" { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) + } + host := socketAddr.GetAddress() + if host == "" { + return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") + } + port := socketAddr.GetPortValue() + if port == 0 { + return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") + } + return net.JoinHostPort(host, strconv.Itoa(int(port))), nil } // securityConfigFromCluster extracts the relevant security configuration from // the received Cluster resource. func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { + if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { + return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) + } // The Cluster resource contains a `transport_socket` field, which contains // a oneof `typed_config` field of type `protobuf.Any`. The any proto // contains a marshaled representation of an `UpstreamTlsContext` message. @@ -665,22 +815,55 @@ func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, e if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) } + // The following fields from `UpstreamTlsContext` are ignored: + // - sni + // - allow_renegotiation + // - max_session_keys if upstreamCtx.GetCommonTlsContext() == nil { return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") } - sc, err := securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext()) - if err != nil { - return nil, err + return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) +} + +// common is expected to be not nil. +// The `alpn_protocols` field is ignored. +func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + if common.GetTlsParams() != nil { + return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) + } + if common.GetCustomHandshaker() != nil { + return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) + } + + // For now, if we can't get a valid security config from the new fields, we + // fallback to the old deprecated fields. + // TODO: Drop support for deprecated fields. NACK if err != nil here. + sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) + if sc == nil || sc.Equal(&SecurityConfig{}) { + var err error + sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) + if err != nil { + return nil, err + } } - if sc.RootInstanceName == "" { - return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + if sc != nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + if server { + if sc.IdentityInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + } + } else { + if sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + } + } } return sc, nil } -// common is expected to be not nil. -func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext) (*SecurityConfig, error) { +func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { // The `CommonTlsContext` contains a // `tls_certificate_certificate_provider_instance` field of type // `CertificateProviderInstance`, which contains the provider instance name @@ -703,16 +886,19 @@ func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext) (*Secu switch t := common.GetValidationContextType().(type) { case *v3tlspb.CommonTlsContext_CombinedValidationContext: combined := common.GetCombinedValidationContext() - var matchers []xds.StringMatcher + var matchers []matcher.StringMatcher if def := combined.GetDefaultValidationContext(); def != nil { for _, m := range def.GetMatchSubjectAltNames() { - matcher, err := xds.StringMatcherFromProto(m) + matcher, err := matcher.StringMatcherFromProto(m) if err != nil { return nil, err } matchers = append(matchers, matcher) } } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } sc.SubjectAltNameMatchers = matchers if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { sc.RootInstanceName = pi.GetInstanceName() @@ -730,13 +916,114 @@ func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext) (*Secu return sc, nil } +// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md +// specifies the new way to fetch security configuration and says the following: +// +// Although there are various ways to obtain certificates as per this proto +// (which are supported by Envoy), gRPC supports only one of them and that is +// the `CertificateProviderPluginInstance` proto. +// +// This helper function attempts to fetch security configuration from the +// `CertificateProviderPluginInstance` message, given a CommonTlsContext. +func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `tls_certificate_provider_instance` field of type + // `CertificateProviderPluginInstance` is used to fetch the identity + // certificate provider. + sc := &SecurityConfig{} + identity := common.GetTlsCertificateProviderInstance() + if identity == nil && len(common.GetTlsCertificates()) != 0 { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) + } + if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) + } + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + + // The `CommonTlsContext` contains a oneof field `validation_context_type`, + // which contains the `CertificateValidationContext` message in one of the + // following ways: + // - `validation_context` field + // - this is directly of type `CertificateValidationContext` + // - `combined_validation_context` field + // - this is of type `CombinedCertificateValidationContext` and contains + // a `default validation context` field of type + // `CertificateValidationContext` + // + // The `CertificateValidationContext` message has the following fields that + // we are interested in: + // - `ca_certificate_provider_instance` + // - this is of type `CertificateProviderPluginInstance` + // - `match_subject_alt_names` + // - this is a list of string matchers + // + // The `CertificateProviderPluginInstance` message contains two fields + // - instance_name + // - this is the certificate provider instance name to be looked up in + // the bootstrap configuration + // - certificate_name + // - this is an opaque name passed to the certificate provider + var validationCtx *v3tlspb.CertificateValidationContext + switch typ := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_ValidationContext: + validationCtx = common.GetValidationContext() + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() + case nil: + // It is valid for the validation context to be nil on the server side. + return sc, nil + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) + } + // If we get here, it means that the `CertificateValidationContext` message + // was found through one of the supported ways. It is an error if the + // validation context is specified, but it does not contain the + // ca_certificate_provider_instance field which contains information about + // the certificate provider to be used for the root certificates. + if validationCtx.GetCaCertificateProviderInstance() == nil { + return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) + } + // The following fields are ignored: + // - trusted_ca + // - watched_directory + // - allow_expired_certificate + // - trust_chain_verification + switch { + case len(validationCtx.GetVerifyCertificateSpki()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) + case len(validationCtx.GetVerifyCertificateHash()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) + case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): + return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) + case validationCtx.GetCrl() != nil: + return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) + case validationCtx.GetCustomValidatorConfig() != nil: + return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) + } + + if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { + sc.RootInstanceName = rootProvider.GetInstanceName() + sc.RootCertName = rootProvider.GetCertificateName() + } + var matchers []matcher.StringMatcher + for _, m := range validationCtx.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + return sc, nil +} + // circuitBreakersFromCluster extracts the circuit breakers configuration from // the received cluster resource. Returns nil if no CircuitBreakers or no // Thresholds in CircuitBreakers. func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { - if !env.CircuitBreakingSupport { - return nil - } for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { continue @@ -754,8 +1041,8 @@ func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { // UnmarshalEndpoints processes resources received in an EDS response, // validates them, and transforms them into a native struct which contains only // fields we are interested in. -func UnmarshalEndpoints(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdate, UpdateMetadata, error) { - update := make(map[string]EndpointsUpdate) +func UnmarshalEndpoints(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdateErrTuple, UpdateMetadata, error) { + update := make(map[string]EndpointsUpdateErrTuple) md, err := processAllResources(version, resources, logger, update) return update, md, err } @@ -769,7 +1056,7 @@ func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (str if err := proto.Unmarshal(r.GetValue(), cla); err != nil { return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, cla) + logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, pretty.ToJSON(cla)) u, err := parseEDSRespProto(cla) if err != nil { @@ -849,9 +1136,45 @@ func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, return ret, nil } +// ListenerUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ListenerUpdateErrTuple struct { + Update ListenerUpdate + Err error +} + +// RouteConfigUpdateErrTuple is a tuple with the update and error. It contains +// the results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type RouteConfigUpdateErrTuple struct { + Update RouteConfigUpdate + Err error +} + +// ClusterUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ClusterUpdateErrTuple struct { + Update ClusterUpdate + Err error +} + +// EndpointsUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type EndpointsUpdateErrTuple struct { + Update EndpointsUpdate + Err error +} + // processAllResources unmarshals and validates the resources, populates the // provided ret (a map), and returns metadata and error. // +// After this function, the ret map will be populated with both valid and +// invalid updates. Invalid resources will have an entry with the key as the +// resource name, value as an empty update. +// // The type of the resource is determined by the type of ret. E.g. // map[string]ListenerUpdate means this is for LDS. func processAllResources(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger, ret interface{}) (UpdateMetadata, error) { @@ -865,10 +1188,10 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog for _, r := range resources { switch ret2 := ret.(type) { - case map[string]ListenerUpdate: + case map[string]ListenerUpdateErrTuple: name, update, err := unmarshalListenerResource(r, logger) if err == nil { - ret2[name] = update + ret2[name] = ListenerUpdateErrTuple{Update: update} continue } if name == "" { @@ -878,11 +1201,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog perResourceErrors[name] = err // Add place holder in the map so we know this resource name was in // the response. - ret2[name] = ListenerUpdate{} - case map[string]RouteConfigUpdate: + ret2[name] = ListenerUpdateErrTuple{Err: err} + case map[string]RouteConfigUpdateErrTuple: name, update, err := unmarshalRouteConfigResource(r, logger) if err == nil { - ret2[name] = update + ret2[name] = RouteConfigUpdateErrTuple{Update: update} continue } if name == "" { @@ -892,11 +1215,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog perResourceErrors[name] = err // Add place holder in the map so we know this resource name was in // the response. - ret2[name] = RouteConfigUpdate{} - case map[string]ClusterUpdate: + ret2[name] = RouteConfigUpdateErrTuple{Err: err} + case map[string]ClusterUpdateErrTuple: name, update, err := unmarshalClusterResource(r, logger) if err == nil { - ret2[name] = update + ret2[name] = ClusterUpdateErrTuple{Update: update} continue } if name == "" { @@ -906,11 +1229,11 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog perResourceErrors[name] = err // Add place holder in the map so we know this resource name was in // the response. - ret2[name] = ClusterUpdate{} - case map[string]EndpointsUpdate: + ret2[name] = ClusterUpdateErrTuple{Err: err} + case map[string]EndpointsUpdateErrTuple: name, update, err := unmarshalEndpointsResource(r, logger) if err == nil { - ret2[name] = update + ret2[name] = EndpointsUpdateErrTuple{Update: update} continue } if name == "" { @@ -920,7 +1243,7 @@ func processAllResources(version string, resources []*anypb.Any, logger *grpclog perResourceErrors[name] = err // Add place holder in the map so we know this resource name was in // the response. - ret2[name] = EndpointsUpdate{} + ret2[name] = EndpointsUpdateErrTuple{Err: err} } } diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go index 3a2b629ae9..b36fa64b50 100644 --- a/vendor/google.golang.org/grpc/xds/server.go +++ b/vendor/google.golang.org/grpc/xds/server.go @@ -27,25 +27,31 @@ import ( "sync" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/buffer" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/server" + "google.golang.org/grpc/xds/internal/xdsclient" ) const serverPrefix = "[xds-server %p] " var ( // These new functions will be overridden in unit tests. - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsclient.XDSClient, error) { return xdsclient.New() } - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return grpc.NewServer(opts...) } @@ -58,21 +64,14 @@ func prefixLogger(p *GRPCServer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, p)) } -// xdsClientInterface contains methods from xdsClient.Client which are used by -// the server. This is useful for overriding in unit tests. -type xdsClientInterface interface { - WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - -// grpcServerInterface contains methods from grpc.Server which are used by the +// grpcServer contains methods from grpc.Server which are used by the // GRPCServer type here. This is useful for overriding in unit tests. -type grpcServerInterface interface { +type grpcServer interface { RegisterService(*grpc.ServiceDesc, interface{}) Serve(net.Listener) error Stop() GracefulStop() + GetServiceInfo() map[string]grpc.ServiceInfo } // GRPCServer wraps a gRPC server and provides server-side xDS functionality, by @@ -80,7 +79,7 @@ type grpcServerInterface interface { // grpc.ServiceRegistrar interface and can be passed to service registration // functions in IDL generated code. type GRPCServer struct { - gs grpcServerInterface + gs grpcServer quit *grpcsync.Event logger *internalgrpclog.PrefixLogger xdsCredsInUse bool @@ -90,7 +89,7 @@ type GRPCServer struct { // beginning of Serve(), where we have to decide if we have to create a // client or use an existing one. clientMu sync.Mutex - xdsC xdsClientInterface + xdsC xdsclient.XDSClient } // NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. @@ -131,8 +130,8 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { func handleServerOptions(opts []grpc.ServerOption) *serverOptions { so := &serverOptions{} for _, opt := range opts { - if o, ok := opt.(serverOption); ok { - o.applyServerOption(so) + if o, ok := opt.(*serverOption); ok { + o.apply(so) } } return so @@ -145,6 +144,12 @@ func (s *GRPCServer) RegisterService(sd *grpc.ServiceDesc, ss interface{}) { s.gs.RegisterService(sd, ss) } +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *GRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { + return s.gs.GetServiceInfo() +} + // initXDSClient creates a new xdsClient if there is no existing one available. func (s *GRPCServer) initXDSClient() error { s.clientMu.Lock() @@ -154,6 +159,12 @@ func (s *GRPCServer) initXDSClient() error { return nil } + newXDSClient := newXDSClient + if s.opts.bootstrapContents != nil { + newXDSClient = func() (xdsclient.XDSClient, error) { + return xdsclient.NewClientWithBootstrapContents(s.opts.bootstrapContents) + } + } client, err := newXDSClient() if err != nil { return fmt.Errorf("xds: failed to create xds-client: %v", err) @@ -181,7 +192,6 @@ func (s *GRPCServer) Serve(lis net.Listener) error { if err := s.initXDSClient(); err != nil { return err } - cfg := s.xdsC.BootstrapConfig() if cfg == nil { return errors.New("bootstrap configuration is empty") @@ -223,13 +233,18 @@ func (s *GRPCServer) Serve(lis net.Listener) error { ListenerResourceName: name, XDSCredsInUse: s.xdsCredsInUse, XDSClient: s.xdsC, - ModeCallback: func(addr net.Addr, mode server.ServingMode, err error) { + ModeCallback: func(addr net.Addr, mode connectivity.ServingMode, err error) { modeUpdateCh.Put(&modeChangeArgs{ addr: addr, mode: mode, err: err, }) }, + DrainCallback: func(addr net.Addr) { + if gs, ok := s.gs.(*grpc.Server); ok { + drainServerTransports(gs, addr.String()) + } + }, }) // Block until a good LDS response is received or the server is stopped. @@ -248,7 +263,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { // modeChangeArgs wraps argument required for invoking mode change callback. type modeChangeArgs struct { addr net.Addr - mode server.ServingMode + mode connectivity.ServingMode err error } @@ -265,7 +280,7 @@ func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { case u := <-updateCh.Get(): updateCh.Load() args := u.(*modeChangeArgs) - if args.mode == ServingModeNotServing { + if args.mode == connectivity.ServingModeNotServing { // We type assert our underlying gRPC server to the real // grpc.Server here before trying to initiate the drain // operation. This approach avoids performing the same type @@ -309,18 +324,79 @@ func (s *GRPCServer) GracefulStop() { } } +// routeAndProcess routes the incoming RPC to a configured route in the route +// table and also processes the RPC by running the incoming RPC through any HTTP +// Filters configured. +func routeAndProcess(ctx context.Context) error { + conn := transport.GetConnection(ctx) + cw, ok := conn.(interface { + VirtualHosts() []xdsclient.VirtualHostWithInterceptors + }) + if !ok { + return errors.New("missing virtual hosts in incoming context") + } + mn, ok := grpc.Method(ctx) + if !ok { + return errors.New("missing method name in incoming context") + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errors.New("missing metadata in incoming context") + } + // A41 added logic to the core grpc implementation to guarantee that once + // the RPC gets to this point, there will be a single, unambiguous authority + // present in the header map. + authority := md.Get(":authority") + vh := xdsclient.FindBestMatchingVirtualHostServer(authority[0], cw.VirtualHosts()) + if vh == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") + } + + var rwi *xdsclient.RouteWithInterceptors + rpcInfo := iresolver.RPCInfo{ + Context: ctx, + Method: mn, + } + for _, r := range vh.Routes { + if r.M.Match(rpcInfo) { + // "NonForwardingAction is expected for all Routes used on server-side; a route with an inappropriate action causes + // RPCs matching that route to fail with UNAVAILABLE." - A36 + if r.RouteAction != xdsclient.RouteActionNonForwardingAction { + return status.Error(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") + } + rwi = &r + break + } + } + if rwi == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Route") + } + for _, interceptor := range rwi.Interceptors { + if err := interceptor.AllowRPC(ctx); err != nil { + return status.Errorf(codes.PermissionDenied, "Incoming RPC is not allowed: %v", err) + } + } + return nil +} + // xdsUnaryInterceptor is the unary interceptor added to the gRPC server to // perform any xDS specific functionality on unary RPCs. -// -// This is a no-op at this point. func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + if env.RBACSupport { + if err := routeAndProcess(ctx); err != nil { + return nil, err + } + } return handler(ctx, req) } // xdsStreamInterceptor is the stream interceptor added to the gRPC server to // perform any xDS specific functionality on streaming RPCs. -// -// This is a no-op at this point. func xdsStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if env.RBACSupport { + if err := routeAndProcess(ss.Context()); err != nil { + return err + } + } return handler(srv, ss) } diff --git a/vendor/google.golang.org/grpc/xds/server_options.go b/vendor/google.golang.org/grpc/xds/server_options.go index 44b7b374fd..1d46c3adb7 100644 --- a/vendor/google.golang.org/grpc/xds/server_options.go +++ b/vendor/google.golang.org/grpc/xds/server_options.go @@ -22,50 +22,25 @@ import ( "net" "google.golang.org/grpc" - iserver "google.golang.org/grpc/xds/internal/server" + "google.golang.org/grpc/connectivity" ) -// ServingModeCallback returns a grpc.ServerOption which allows users to -// register a callback to get notified about serving mode changes. -func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { - return &smcOption{cb: cb} -} - -type serverOption interface { - applyServerOption(*serverOptions) +type serverOptions struct { + modeCallback ServingModeCallbackFunc + bootstrapContents []byte } -// smcOption is a server option containing a callback to be invoked when the -// serving mode changes. -type smcOption struct { - // Embedding the empty server option makes it safe to pass it to - // grpc.NewServer(). +type serverOption struct { grpc.EmptyServerOption - cb ServingModeCallbackFunc + apply func(*serverOptions) } -func (s *smcOption) applyServerOption(o *serverOptions) { - o.modeCallback = s.cb -} - -type serverOptions struct { - modeCallback ServingModeCallbackFunc +// ServingModeCallback returns a grpc.ServerOption which allows users to +// register a callback to get notified about serving mode changes. +func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.modeCallback = cb }} } -// ServingMode indicates the current mode of operation of the server. -type ServingMode = iserver.ServingMode - -const ( - // ServingModeServing indicates the the server contains all required xDS - // configuration is serving RPCs. - ServingModeServing = iserver.ServingModeServing - // ServingModeNotServing indicates that the server is not accepting new - // connections. Existing connections will be closed gracefully, allowing - // in-progress RPCs to complete. A server enters this mode when it does not - // contain the required xDS configuration to serve RPCs. - ServingModeNotServing = iserver.ServingModeNotServing -) - // ServingModeCallbackFunc is the callback that users can register to get // notified about the server's serving mode changes. The callback is invoked // with the address of the listener and its new mode. @@ -77,8 +52,25 @@ type ServingModeCallbackFunc func(addr net.Addr, args ServingModeChangeArgs) // function. type ServingModeChangeArgs struct { // Mode is the new serving mode of the server listener. - Mode ServingMode + Mode connectivity.ServingMode // Err is set to a non-nil error if the server has transitioned into // not-serving mode. Err error } + +// BootstrapContentsForTesting returns a grpc.ServerOption which allows users +// to inject a bootstrap configuration used by only this server, instead of the +// global configuration from the environment variables. +// +// Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func BootstrapContentsForTesting(contents []byte) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.bootstrapContents = contents }} +} diff --git a/vendor/google.golang.org/grpc/xds/xds.go b/vendor/google.golang.org/grpc/xds/xds.go index 23c88903f4..ec16c9f520 100644 --- a/vendor/google.golang.org/grpc/xds/xds.go +++ b/vendor/google.golang.org/grpc/xds/xds.go @@ -25,11 +25,6 @@ // // See https://github.com/grpc/grpc-go/tree/master/examples/features/xds for // example. -// -// Experimental -// -// Notice: All APIs in this package are experimental and may be removed in a -// later release. package xds import ( @@ -38,14 +33,15 @@ import ( v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "google.golang.org/grpc" internaladmin "google.golang.org/grpc/internal/admin" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 xDS API client. - _ "google.golang.org/grpc/xds/internal/client/v3" // Register the v3 xDS API client. _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + xdsresolver "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register the v2 xDS API client. + _ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register the v3 xDS API client. ) func init() { @@ -76,3 +72,21 @@ func init() { return csdss.Close, nil }) } + +// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using +// the provided xds bootstrap config instead of the global configuration from +// the supported environment variables. The resolver.Builder is meant to be +// used in conjunction with the grpc.WithResolvers DialOption. +// +// Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { + return xdsresolver.NewBuilder(bootstrapConfig) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index bbf66e55bc..0b1ffbe045 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -376,7 +376,6 @@ github.com/google/btree # github.com/google/go-cmp v0.5.6 ## explicit; go 1.8 github.com/google/go-cmp/cmp -github.com/google/go-cmp/cmp/cmpopts github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function @@ -1011,8 +1010,8 @@ google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.40.0 => google.golang.org/grpc v1.38.0 -## explicit; go 1.11 +# google.golang.org/grpc v1.41.0 +## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1062,6 +1061,7 @@ google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/hierarchy google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -1072,8 +1072,8 @@ google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/internal/wrr -google.golang.org/grpc/internal/xds google.golang.org/grpc/internal/xds/env +google.golang.org/grpc/internal/xds/matcher google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer @@ -1091,21 +1091,25 @@ google.golang.org/grpc/xds/internal google.golang.org/grpc/xds/internal/balancer google.golang.org/grpc/xds/internal/balancer/balancergroup google.golang.org/grpc/xds/internal/balancer/cdsbalancer +google.golang.org/grpc/xds/internal/balancer/clusterimpl google.golang.org/grpc/xds/internal/balancer/clustermanager -google.golang.org/grpc/xds/internal/balancer/edsbalancer +google.golang.org/grpc/xds/internal/balancer/clusterresolver +google.golang.org/grpc/xds/internal/balancer/loadstore +google.golang.org/grpc/xds/internal/balancer/priority +google.golang.org/grpc/xds/internal/balancer/ringhash google.golang.org/grpc/xds/internal/balancer/weightedtarget google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator -google.golang.org/grpc/xds/internal/client -google.golang.org/grpc/xds/internal/client/bootstrap -google.golang.org/grpc/xds/internal/client/load -google.golang.org/grpc/xds/internal/client/v2 -google.golang.org/grpc/xds/internal/client/v3 google.golang.org/grpc/xds/internal/httpfilter google.golang.org/grpc/xds/internal/httpfilter/fault google.golang.org/grpc/xds/internal/httpfilter/router google.golang.org/grpc/xds/internal/resolver google.golang.org/grpc/xds/internal/server google.golang.org/grpc/xds/internal/version +google.golang.org/grpc/xds/internal/xdsclient +google.golang.org/grpc/xds/internal/xdsclient/bootstrap +google.golang.org/grpc/xds/internal/xdsclient/load +google.golang.org/grpc/xds/internal/xdsclient/v2 +google.golang.org/grpc/xds/internal/xdsclient/v3 # google.golang.org/protobuf v1.27.1 ## explicit; go 1.9 google.golang.org/protobuf/encoding/protojson @@ -1172,4 +1176,3 @@ sigs.k8s.io/yaml # github.com/prometheus/prometheus => github.com/grafana/prometheus-private v0.0.0-20211210120034-fbe3167fda56 # github.com/hashicorp/go-immutable-radix => github.com/hashicorp/go-immutable-radix v1.2.0 # github.com/hashicorp/go-hclog => github.com/hashicorp/go-hclog v0.12.2 -# google.golang.org/grpc => google.golang.org/grpc v1.38.0