diff --git a/CHANGELOG.md b/CHANGELOG.md index f2196371a0..158cd6dbb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ * New exported metrics: * `cortex_bucket_store_series_hash_cache_requests_total` * `cortex_bucket_store_series_hash_cache_hits_total` +* [FEATURE] PromQL: added `present_over_time` support. #139 * [ENHANCEMENT] Include additional limits in the per-tenant override exporter. The following limits have been added to the `cortex_overrides` metric: #21 * `max_fetched_series_per_query` * `max_fetched_chunk_bytes_per_query` diff --git a/go.mod b/go.mod index 0592ef03b6..b0560a63b4 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 github.com/alicebob/miniredis/v2 v2.14.3 github.com/armon/go-metrics v0.3.6 - github.com/aws/aws-sdk-go v1.38.60 + github.com/aws/aws-sdk-go v1.40.10 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b github.com/cespare/xxhash v1.1.0 github.com/dustin/go-humanize v1.0.0 @@ -26,11 +26,11 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/gogo/status v1.0.3 github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.3 - github.com/gorilla/mux v1.7.3 + github.com/golang/snappy v0.0.4 + github.com/gorilla/mux v1.8.0 github.com/grafana/dskit v0.0.0-20210813141227-f25e16ec09b2 github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 - github.com/hashicorp/consul/api v1.8.1 + github.com/hashicorp/consul/api v1.9.1 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/memberlist v0.2.3 @@ -47,11 +47,11 @@ require ( github.com/prometheus/alertmanager v0.22.3-0.20210628111558-8491f816296b github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.29.0 + github.com/prometheus/common v0.30.0 github.com/prometheus/prometheus v1.8.2-0.20210720123808-b1ed4a0a663d github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e github.com/sony/gobreaker v0.4.1 - github.com/spf13/afero v1.2.2 + github.com/spf13/afero v1.3.4 github.com/stretchr/testify v1.7.0 github.com/thanos-io/thanos v0.19.1-0.20210804003402-cd18b6155169 github.com/uber/jaeger-client-go v2.29.1+incompatible @@ -62,12 +62,11 @@ require ( go.etcd.io/etcd/server/v3 v3.5.0-alpha.0.0.20210225194612-fa82d11a958a go.uber.org/atomic v1.9.0 go.uber.org/goleak v1.1.10 - golang.org/x/net v0.0.0-20210610132358-84b48f89b13b + golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 - golang.org/x/tools v0.1.5 // indirect - google.golang.org/api v0.48.0 - google.golang.org/grpc v1.38.0 + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac + google.golang.org/api v0.51.0 + google.golang.org/grpc v1.39.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b sigs.k8s.io/yaml v1.2.0 @@ -87,9 +86,12 @@ replace github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915 replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab // Using a fork of Prometheus while we work on querysharding to avoid a dependency on the upstream. -replace github.com/prometheus/prometheus => github.com/grafana/prometheus-private v0.0.0-20210720123808-b1ed4a0a663d +replace github.com/prometheus/prometheus => github.com/grafana/prometheus-private v0.0.0-20210813131603-a3ad21265842 // Pin hashicorp depencencies since the Prometheus fork, go mod tries to update them. replace github.com/hashicorp/go-immutable-radix => github.com/hashicorp/go-immutable-radix v1.2.0 replace github.com/hashicorp/go-hclog => github.com/hashicorp/go-hclog v0.12.2 + +// TODO review the change introduced by https://github.com/grpc/grpc-go/pull/4416 before upgrading to 1.39.0 +replace google.golang.org/grpc => google.golang.org/grpc v1.38.0 diff --git a/go.sum b/go.sum index a91f05e614..a6e25de5d9 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -22,8 +21,10 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0 h1:bAMqZidYkmIsUqe6PtkEPT7Q+vfizScn+jfNA6jwK9c= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0 h1:8ZtzmY4a2JIO2sljMbpqkDYxA8aJQveYr3AMa+X40oc= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -55,8 +56,8 @@ github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v55.2.0+incompatible h1:TL2/vJWJEPOrmv97nHcbvjXES0Ntlb9P95hqGA1J2dU= -github.com/Azure/azure-sdk-for-go v55.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v55.8.0+incompatible h1:EuccMPzxu67cIE95/mrtwQivLv7ETmURi5IUgLNVug8= +github.com/Azure/azure-sdk-for-go v55.8.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -219,8 +220,8 @@ github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+ github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.8/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.60 h1:MgyEsX0IMwivwth1VwEnesBpH0vxbjp5a0w1lurMOXY= -github.com/aws/aws-sdk-go v1.38.60/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.40.10 h1:h+xUINuuH/9CwxE7O8mAuW7Aj9E5agfE9jQ3DrJsnA8= +github.com/aws/aws-sdk-go v1.40.10/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= @@ -284,10 +285,9 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190531201743-edce55837238/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -332,8 +332,8 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.3 h1:mfKOepNDIJ3EiBTEyHFpEqB6YSOSkGcjPDIu7cD+YzY= -github.com/containerd/containerd v1.5.3/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= +github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA= +github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -421,6 +421,7 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -441,8 +442,11 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -453,8 +457,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/digitalocean/godo v1.62.0 h1:7Gw2KFsWkxl36qJa0s50tgXaE0Cgm51JdRP+MFQvNnM= -github.com/digitalocean/godo v1.62.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.64.2 h1:lJEB2TVIkJydFWJMPtdYOPa2Xwib+smZqq/oUZF8/iA= +github.com/digitalocean/godo v1.64.2/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= @@ -501,14 +505,12 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9 h1:vQLjymTobffN2R0F8eTqw6q7iozfRO5Z0m+/4Vw+/uA= +github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM= +github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -537,6 +539,7 @@ github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -544,6 +547,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -742,7 +746,6 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -750,6 +753,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -773,8 +777,10 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -782,6 +788,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -821,8 +828,9 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 h1:2tft2559dNwKl2znYB58oVTql0grRB+Ml3LWIBbc4WM= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303 h1:UP+GUfRSErTzM7UtTMt4TjnfXKK3ybrGQasgSOp7vLY= +github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -841,8 +849,8 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= -github.com/gophercloud/gophercloud v0.18.0 h1:V6hcuMPmjXg+js9flU8T3RIHDCjV7F5CG5GD0MRhP/w= -github.com/gophercloud/gophercloud v0.18.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.19.0 h1:zzaIh8W2K5M4AkJhPV1z6O4Sp0FOObzXm61NUmFz3Kw= +github.com/gophercloud/gophercloud v0.19.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -851,8 +859,9 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= @@ -861,8 +870,8 @@ github.com/grafana/dskit v0.0.0-20210813141227-f25e16ec09b2 h1:CvhyhUHPJfWw8KE55 github.com/grafana/dskit v0.0.0-20210813141227-f25e16ec09b2/go.mod h1:cfT+FhY8u0pX9cxG4xWcR7hYnsgCZN45qbZ7SIKHXqU= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= -github.com/grafana/prometheus-private v0.0.0-20210720123808-b1ed4a0a663d h1:QIkiKJQhNSX2ow0JhLbkS7u5QFMT/8hJlCEu76joirc= -github.com/grafana/prometheus-private v0.0.0-20210720123808-b1ed4a0a663d/go.mod h1:o6V+A4iPEWjLG0rSEKeev3OzfBZwP+ay+4iS4dkfLI4= +github.com/grafana/prometheus-private v0.0.0-20210813131603-a3ad21265842 h1:ooA3k7/sLYgexE3B6I5tIqmo1ey8Ko5HoL7+iNTO/XY= +github.com/grafana/prometheus-private v0.0.0-20210813131603-a3ad21265842/go.mod h1:ZHczEifRAgXT0ypud2xADA4wVWymlQeZiPAzEvTNDas= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -889,14 +898,16 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038txr/IMfbLPATa4= github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= -github.com/hashicorp/consul/api v1.8.1 h1:BOEQaMWoGMhmQ29fC26bi0qb7/rId9JzZP2V0Xmx7m8= github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.9.1 h1:SngrdG2L62qqLsUz85qcPhFZ78rPf8tcD5qjMgs6MME= +github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -950,12 +961,13 @@ github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqB github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hetznercloud/hcloud-go v1.26.2 h1:fI8BXAGJI4EFeCDd2a/I4EhqyK32cDdxGeWfYMGUi50= -github.com/hetznercloud/hcloud-go v1.26.2/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI= +github.com/hetznercloud/hcloud-go v1.28.0 h1:T2a0CVGETf7BoWIdZ/TACqmTZAa/ROutcfdUHYiPAQ4= +github.com/hetznercloud/hcloud-go v1.28.0/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -965,13 +977,13 @@ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/flux v0.113.0/go.mod h1:3TJtvbm/Kwuo5/PEo5P6HUzwVg4bXWkb2wPQHPtQdlU= +github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= -github.com/influxdata/influxdb v1.9.2/go.mod h1:UEe3MeD9AaP5rlPIes102IhYua3FhIWZuOXNHxDjSrI= +github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc= +github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= -github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= -github.com/influxdata/pkg-config v0.2.6/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= @@ -1044,6 +1056,7 @@ github.com/knq/sysutil v0.0.0-20191005231841-15668db23d08/go.mod h1:dFWs1zEqDjFt github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -1060,6 +1073,8 @@ github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LE github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= @@ -1070,11 +1085,11 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v0.28.5 h1:JaCziTxHJ7a01MYyjHqskRc8zXQxXOddwrDeoQ2rBnw= -github.com/linode/linodego v0.28.5/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= +github.com/linode/linodego v0.31.0 h1:99+t6GtTaIsL+ncz5BpCbkh8Y90rC7qW1Not8MFe0Gw= +github.com/linode/linodego v0.31.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1089,10 +1104,14 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs= @@ -1100,6 +1119,7 @@ github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6I github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= @@ -1132,8 +1152,8 @@ github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.38/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.42 h1:gWGe42RGaIqXQZ+r3WUGEKBEtvPHY2SXo4dqixDNxuY= -github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= @@ -1302,6 +1322,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -1370,12 +1391,13 @@ github.com/prometheus/common v0.21.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.24.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/exporter-toolkit v0.5.0/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= -github.com/prometheus/exporter-toolkit v0.6.0 h1:rGoS9gIqj3sXaw+frvo0ozCs1CxBRqpOCGsbixC52UI= -github.com/prometheus/exporter-toolkit v0.6.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0= +github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1466,8 +1488,10 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.3.4 h1:8q6vk3hthlpb2SouZcnBVKboxWQWMDNF38bwholZrJc= +github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -1534,7 +1558,6 @@ github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixU github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= @@ -1550,6 +1573,9 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= @@ -1659,13 +1685,13 @@ go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiW go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= @@ -1704,6 +1730,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1713,6 +1740,7 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -1739,7 +1767,6 @@ golang.org/x/exp v0.0.0-20200821190819-94841d0725da/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1766,7 +1793,6 @@ golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1833,8 +1859,9 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 h1:4CSI6oo7cOjJKajidEljs9h+uP0rRZBPPPhcCbj5mw8= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1850,8 +1877,9 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1868,7 +1896,6 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cO golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1950,6 +1977,7 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1969,7 +1997,6 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1980,13 +2007,16 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2004,19 +2034,16 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= -golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -2077,6 +2104,7 @@ golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -2095,6 +2123,7 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2137,9 +2166,10 @@ google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEG google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0 h1:RDAPWfNFY06dffEXfn7hZF5Fr1ZbnChzfQZAPyBd1+I= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0 h1:SQaA2Cx57B+iPw2MBgyjEkoeMkRK2IenSGoia0U3lCk= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2207,38 +2237,12 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea h1:8ZyCcgugUqamxp/vZSEJw9CMy7VZlSWYJLLJPi/dSDA= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2253,8 +2257,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -2305,7 +2310,6 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2321,8 +2325,8 @@ k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuv k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= +k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -2344,8 +2348,8 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.10.0 h1:R2HDMDJsHVTHA2n4RjwbeYXdOcBymXdX/JRb1v0VGhE= +k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= @@ -2364,8 +2368,8 @@ sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 9931f7a8db..d146800a0e 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,80 @@ # Changes +## [0.87.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.86.0...v0.87.0) (2021-07-13) + + +### Features + +* **container:** allow updating security group on existing clusters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c)) +* **monitoring/dashboard:** added validation only mode when writing dashboards feat: added alert chart widget ([652d7c2](https://www.github.com/googleapis/google-cloud-go/commit/652d7c277da2f6774729064ab65d557875c81567)) +* **networkmanagment:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06)) +* **secretmanager:** Tune Secret Manager auto retry parameters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c)) +* **video/transcoder:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06)) + + +### Bug Fixes + +* **compute:** properly generate PUT requests ([#4426](https://www.github.com/googleapis/google-cloud-go/issues/4426)) ([a7491a5](https://www.github.com/googleapis/google-cloud-go/commit/a7491a533e4ad75eb6d5f89718d4dafb0c5b4167)) +* **internal:** fix relative pathing for generator ([#4397](https://www.github.com/googleapis/google-cloud-go/issues/4397)) ([25e0eae](https://www.github.com/googleapis/google-cloud-go/commit/25e0eaecf9feb1caa97988c5398ac58f6ca17391)) + + +### Miscellaneous Chores + +* **all:** fix release version ([#4427](https://www.github.com/googleapis/google-cloud-go/issues/4427)) ([2c0d267](https://www.github.com/googleapis/google-cloud-go/commit/2c0d2673ccab7281b6432215ee8279f9efd04a15)) + +## [0.86.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.85.0...v0.86.0) (2021-07-01) + + +### Features + +* **bigquery managedwriter:** schema conversion support ([#4357](https://www.github.com/googleapis/google-cloud-go/issues/4357)) ([f2b20f4](https://www.github.com/googleapis/google-cloud-go/commit/f2b20f493e2ed5a883ce42fa65695c03c574feb5)) + +## [0.85.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.84.0...v0.85.0) (2021-06-30) + + +### Features + +* **dataflow:** start generating apiv1beta3 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) +* **datastream:** start generating apiv1alpha1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) +* **dialogflow:** added Automated agent reply type and allow cancellation flag for partial response feature. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) +* **documentai:** update document.proto, add the processor management methods. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) +* **eventarc:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) +* **gkehub:** added v1alpha messages and client for gkehub ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437)) +* **internal/godocfx:** add support for other modules ([#4290](https://www.github.com/googleapis/google-cloud-go/issues/4290)) ([d52bae6](https://www.github.com/googleapis/google-cloud-go/commit/d52bae6cd77474174192c46236d309bf967dfa00)) +* **internal/godocfx:** different metadata for different modules ([#4297](https://www.github.com/googleapis/google-cloud-go/issues/4297)) ([598f5b9](https://www.github.com/googleapis/google-cloud-go/commit/598f5b93778b2e2e75265ae54484dd54477433f5)) +* **internal:** add force option for regen ([#4310](https://www.github.com/googleapis/google-cloud-go/issues/4310)) ([de654eb](https://www.github.com/googleapis/google-cloud-go/commit/de654ebfcf23a53b4d1fee0aa48c73999a55c1a6)) +* **servicecontrol:** Added the gRPC service config for the Service Controller v1 API docs: Updated some comments. ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437)) +* **workflows/executions:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273)) + + +### Bug Fixes + +* **internal:** add autogenerated header to snippets ([#4261](https://www.github.com/googleapis/google-cloud-go/issues/4261)) ([2220787](https://www.github.com/googleapis/google-cloud-go/commit/222078722c37c3fdadec7bbbe0bcf81edd105f1a)), refs [#4260](https://www.github.com/googleapis/google-cloud-go/issues/4260) +* **internal:** fix googleapis-disco regen ([#4354](https://www.github.com/googleapis/google-cloud-go/issues/4354)) ([aeea1ce](https://www.github.com/googleapis/google-cloud-go/commit/aeea1ce1e5dff3acdfe208932327b52c49851b41)) +* **kms:** replace IAMPolicy mixin in service config. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) +* **security/privateca:** Fixed casing of the Ruby namespace ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0)) + +## [0.84.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.83.0...v0.84.0) (2021-06-09) + + +### Features + +* **aiplatform:** start generating apiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) +* **apigeeconnect:** start generating abiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) +* **dialogflow/cx:** support sentiment analysis in bot testing ([7a57aac](https://www.github.com/googleapis/google-cloud-go/commit/7a57aac996f2bae20ee6ddbd02ad9e56e380099b)) +* **dialogflow/cx:** support sentiment analysis in bot testing ([6ad2306](https://www.github.com/googleapis/google-cloud-go/commit/6ad2306f64710ce16059b464342dbc6a98d2d9c2)) +* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c)) +* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([18375e5](https://www.github.com/googleapis/google-cloud-go/commit/18375e50e8f16e63506129b8927a7b62f85e407b)) +* **gkeconnect/gateway:** start generating apiv1beta1 ([#4235](https://www.github.com/googleapis/google-cloud-go/issues/4235)) ([1c3e968](https://www.github.com/googleapis/google-cloud-go/commit/1c3e9689d78670a231a3660db00fd4fd8f5c6345)) +* **lifesciences:** strat generating apiv2beta ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c)) +* **tpu:** start generating apiv1 ([#4199](https://www.github.com/googleapis/google-cloud-go/issues/4199)) ([cac48ea](https://www.github.com/googleapis/google-cloud-go/commit/cac48eab960cd34cc20732f6a1aeb93c540a036b)) + + +### Bug Fixes + +* **bttest:** fix race condition in SampleRowKeys ([#4207](https://www.github.com/googleapis/google-cloud-go/issues/4207)) ([5711fb1](https://www.github.com/googleapis/google-cloud-go/commit/5711fb10d25c458807598d736a232bb2210a047a)) +* **documentai:** Fix Ruby gem title of documentai v1 (package not currently published) ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c)) + ## [0.83.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.82.0...v0.83.0) (2021-06-02) diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index ba024f5abe..25bb890f17 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -12,7 +12,7 @@ To install the packages on your system, *do not clone the repo*. Instead: 1. Change to your project directory: - ``` + ```bash cd /my/cloud/project ``` 1. Get the package you want to use. Some products have their own module, so it's @@ -54,7 +54,7 @@ make backwards-incompatible changes. | [Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) | | [OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) | | [Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) | -| [Pub/Sub Lite][cloud-pubsublite] | beta | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite) | +| [Pub/Sub Lite][cloud-pubsublite] | stable | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite) | | [Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) | | [reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) | | [Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) | diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod index 04ee0cb443..163f580c4f 100644 --- a/vendor/cloud.google.com/go/go.mod +++ b/vendor/cloud.google.com/go/go.mod @@ -4,20 +4,20 @@ go 1.11 require ( cloud.google.com/go/storage v1.10.0 - github.com/golang/mock v1.5.0 + github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.6 github.com/google/martian/v3 v3.2.1 - github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 + github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 github.com/googleapis/gax-go/v2 v2.0.5 github.com/jstemmer/go-junit-report v0.9.1 go.opencensus.io v0.23.0 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c + golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 golang.org/x/text v0.3.6 - golang.org/x/tools v0.1.2 - google.golang.org/api v0.47.0 - google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c - google.golang.org/grpc v1.38.0 - google.golang.org/protobuf v1.26.0 + golang.org/x/tools v0.1.4 + google.golang.org/api v0.50.0 + google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a + google.golang.org/grpc v1.39.0 + google.golang.org/protobuf v1.27.1 ) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum index 6ed2bf87a3..b90b6d6c33 100644 --- a/vendor/cloud.google.com/go/go.sum +++ b/vendor/cloud.google.com/go/go.sum @@ -18,6 +18,8 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -39,6 +41,7 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -47,6 +50,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -54,7 +58,9 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -70,8 +76,9 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -124,13 +131,15 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 h1:ub2sxhs2A0HRa2dWHavvmWxiVGXNfE9wI+gcTMwED8A= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 h1:2tft2559dNwKl2znYB58oVTql0grRB+Ml3LWIBbc4WM= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -144,6 +153,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -162,6 +172,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -251,8 +262,9 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -303,8 +315,10 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 h1:hZR0X1kPW+nwyJ9xRxqZk1vx5RUObAPBdKVvXPDUH/E= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -365,8 +379,10 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4 h1:cVngSRcfgyZCzys3KYOpCFa+4dqX/Oub9tAq00ttGVs= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -393,8 +409,10 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0 h1:sQLWZQvP6jPGIP4JGPkJu4zHswrv81iobiyszr3b/0I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0 h1:LX7NFCFYOHzr7WHaYiRUpeipZe9o5L8T+2F4Z798VDw= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -426,6 +444,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -443,8 +462,12 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a h1:89EorDSnBRFywcvGsJvpxw2IsiDMI+DeM7iZOaunfHs= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -458,6 +481,7 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -465,8 +489,9 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -479,12 +504,14 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index fa85f8c258..9c4e5f1643 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -8,6 +8,15 @@ "release_level": "ga", "library_type": "" }, + "cloud.google.com/go/aiplatform/apiv1": { + "distribution_name": "cloud.google.com/go/aiplatform/apiv1", + "description": "Vertex AI API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/aiplatform/apiv1", + "release_level": "ga", + "library_type": "" + }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", "description": "Google Analytics Admin API", @@ -35,6 +44,15 @@ "release_level": "ga", "library_type": "" }, + "cloud.google.com/go/apigeeconnect/apiv1": { + "distribution_name": "cloud.google.com/go/apigeeconnect/apiv1", + "description": "Apigee Connect API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/apigeeconnect/apiv1", + "release_level": "ga", + "library_type": "" + }, "cloud.google.com/go/appengine/apiv1": { "distribution_name": "cloud.google.com/go/appengine/apiv1", "description": "App Engine Admin API", @@ -266,7 +284,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/clouddms/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/cloudtasks/apiv2": { @@ -296,6 +314,15 @@ "release_level": "beta", "library_type": "" }, + "cloud.google.com/go/compute/apiv1": { + "distribution_name": "cloud.google.com/go/compute/apiv1", + "description": "Google Compute Engine API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/compute/apiv1", + "release_level": "alpha", + "library_type": "" + }, "cloud.google.com/go/compute/metadata": { "distribution_name": "cloud.google.com/go/compute/metadata", "description": "Service Metadata API", @@ -341,6 +368,15 @@ "release_level": "beta", "library_type": "" }, + "cloud.google.com/go/dataflow/apiv1beta3": { + "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", + "description": "Dataflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dataflow/apiv1beta3", + "release_level": "beta", + "library_type": "" + }, "cloud.google.com/go/datalabeling/apiv1beta1": { "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", "description": "Data Labeling API", @@ -395,6 +431,15 @@ "release_level": "alpha", "library_type": "" }, + "cloud.google.com/go/datastream/apiv1alpha1": { + "distribution_name": "cloud.google.com/go/datastream/apiv1alpha1", + "description": "Datastream API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/datastream/apiv1alpha1", + "release_level": "alpha", + "library_type": "" + }, "cloud.google.com/go/debugger/apiv2": { "distribution_name": "cloud.google.com/go/debugger/apiv2", "description": "Stackdriver Debugger API", @@ -419,7 +464,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/dialogflow/cx/apiv3", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/dialogflow/cx/apiv3beta1": { @@ -446,7 +491,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/documentai/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/documentai/apiv1beta3": { @@ -491,6 +536,15 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/essentialcontacts/apiv1", + "release_level": "ga", + "library_type": "" + }, + "cloud.google.com/go/eventarc/apiv1": { + "distribution_name": "cloud.google.com/go/eventarc/apiv1", + "description": "Eventarc API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/eventarc/apiv1", "release_level": "beta", "library_type": "" }, @@ -514,7 +568,7 @@ }, "cloud.google.com/go/firestore/apiv1/admin": { "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", - "description": "Google Cloud Firestore Admin API", + "description": "Cloud Firestore API", "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/firestore/apiv1/admin", @@ -557,6 +611,15 @@ "release_level": "beta", "library_type": "" }, + "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { + "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", + "description": "Connect Gateway API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/gkeconnect/gateway/apiv1beta1", + "release_level": "beta", + "library_type": "" + }, "cloud.google.com/go/gkehub/apiv1beta1": { "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", "description": "GKE Hub", @@ -572,7 +635,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/gsuiteaddons/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/iam": { @@ -629,6 +692,15 @@ "release_level": "beta", "library_type": "" }, + "cloud.google.com/go/lifesciences/apiv2beta": { + "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", + "description": "Cloud Life Sciences API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/lifesciences/apiv2beta", + "release_level": "beta", + "library_type": "" + }, "cloud.google.com/go/logging": { "distribution_name": "cloud.google.com/go/logging", "description": "Cloud Logging API", @@ -698,7 +770,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/metastore/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/metastore/apiv1alpha": { @@ -746,6 +818,15 @@ "release_level": "alpha", "library_type": "" }, + "cloud.google.com/go/networkmanagement/apiv1": { + "distribution_name": "cloud.google.com/go/networkmanagement/apiv1", + "description": "Network Management API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/networkmanagement/apiv1", + "release_level": "beta", + "library_type": "" + }, "cloud.google.com/go/notebooks/apiv1beta1": { "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", "description": "Notebooks API", @@ -896,7 +977,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/pubsublite/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/recaptchaenterprise/apiv1": { @@ -977,7 +1058,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/resourcesettings/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/retail/apiv2": { @@ -1040,7 +1121,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/security/privateca/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/security/privateca/apiv1beta1": { @@ -1130,7 +1211,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/serviceusage/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/shell/apiv1": { @@ -1139,7 +1220,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/shell/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/spanner": { @@ -1205,6 +1286,15 @@ "release_level": "ga", "library_type": "GAPIC_MANUAL" }, + "cloud.google.com/go/storage/internal/apiv1": { + "distribution_name": "cloud.google.com/go/storage/internal/apiv1", + "description": "Cloud Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/storage/internal/apiv1", + "release_level": "alpha", + "library_type": "" + }, "cloud.google.com/go/talent/apiv4": { "distribution_name": "cloud.google.com/go/talent/apiv4", "description": "Cloud Talent Solution API", @@ -1232,6 +1322,15 @@ "release_level": "ga", "library_type": "" }, + "cloud.google.com/go/tpu/apiv1": { + "distribution_name": "cloud.google.com/go/tpu/apiv1", + "description": "Cloud TPU API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/tpu/apiv1", + "release_level": "ga", + "library_type": "" + }, "cloud.google.com/go/trace/apiv1": { "distribution_name": "cloud.google.com/go/trace/apiv1", "description": "Stackdriver Trace API", @@ -1259,6 +1358,15 @@ "release_level": "ga", "library_type": "" }, + "cloud.google.com/go/video/transcoder/apiv1": { + "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", + "description": "Transcoder API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/video/transcoder/apiv1", + "release_level": "beta", + "library_type": "" + }, "cloud.google.com/go/video/transcoder/apiv1beta1": { "distribution_name": "cloud.google.com/go/video/transcoder/apiv1beta1", "description": "Transcoder API", @@ -1310,7 +1418,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/vpcaccess/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "" }, "cloud.google.com/go/webrisk/apiv1": { @@ -1349,6 +1457,15 @@ "release_level": "beta", "library_type": "" }, + "cloud.google.com/go/workflows/executions/apiv1": { + "distribution_name": "cloud.google.com/go/workflows/executions/apiv1", + "description": "Workflow Executions API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/workflows/executions/apiv1", + "release_level": "beta", + "library_type": "" + }, "cloud.google.com/go/workflows/executions/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", "description": "Workflow Executions API", diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go index c512294d89..634668fcd5 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/doc.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -48,7 +48,7 @@ import ( type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) -const versionClient = "20210602" +const versionClient = "20210702" func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index 3fd6b76bf5..4d96cbc08c 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -23,7 +23,6 @@ import ( "net/url" "time" - "github.com/golang/protobuf/proto" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" @@ -33,6 +32,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) var newOperationsClientHook clientHook @@ -52,6 +52,7 @@ func defaultOperationsGRPCClientOptions() []option.ClientOption { internaloption.WithDefaultMTLSEndpoint("longrunning.mtls.googleapis.com:443"), internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 8f35b3464b..df63bade10 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -13,7 +13,6 @@ package ec2metadata import ( "bytes" - "errors" "io" "net/http" "net/url" @@ -234,7 +233,8 @@ func unmarshalError(r *request.Request) { // Response body format is not consistent between metadata endpoints. // Grab the error message as a string and include that as the source error - r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.Error = awserr.NewRequestFailure( + awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil), r.HTTPResponse.StatusCode, r.RequestID) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 654fb1ad52..b98ea86981 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -81,7 +81,6 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol // Customization for i := 0; i < len(ps); i++ { p := &ps[i] - custAddEC2Metadata(p) custAddS3DualStack(p) custRegionalS3(p) custRmIotDataService(p) @@ -140,19 +139,6 @@ func custAddDualstack(p *partition, svcName string) { p.Services[svcName] = s } -func custAddEC2Metadata(p *partition) { - p.Services["ec2metadata"] = service{ - IsRegionalized: boxedFalse, - PartitionEndpoint: "aws-global", - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - } -} - func custRmIotDataService(p *partition) { delete(p.Services, "data.iot") } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index a07649e906..d189be5f7f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -648,9 +648,33 @@ var awsPartition = partition{ "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "api.mediatailor": service{ @@ -767,6 +791,7 @@ var awsPartition = partition{ "appflow": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -862,6 +887,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "fips": endpoint{ Hostname: "appstream2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ @@ -878,6 +904,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -982,6 +1009,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1353,6 +1381,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2142,6 +2171,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2369,17 +2399,6 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, "ecs": service{ Endpoints: endpoints{ @@ -2869,11 +2888,41 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "entitlement.marketplace": service{ @@ -3047,6 +3096,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3555,6 +3605,8 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "honeycode": service{ @@ -3816,6 +3868,18 @@ var awsPartition = partition{ "iotwireless": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, "eu-west-1": endpoint{ Hostname: "api.iotwireless.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3828,6 +3892,12 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + "us-west-2": endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "kafka": service{ @@ -4429,6 +4499,7 @@ var awsPartition = partition{ "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -4811,6 +4882,12 @@ var awsPartition = partition{ Region: "eu-west-2", }, }, + "eu-west-3": endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, "us-east-1": endpoint{ Hostname: "oidc.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -4891,6 +4968,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5137,6 +5215,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "fips-us-east-1": endpoint{ Hostname: "qldb-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -6299,6 +6378,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "fips-us-east-1": endpoint{ Hostname: "session.qldb-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -6734,6 +6814,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -7804,17 +7885,6 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, "ecs": service{ Endpoints: endpoints{ @@ -8098,6 +8168,12 @@ var awscnPartition = partition{ "neptune": service{ Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "rds.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, "cn-northwest-1": endpoint{ Hostname: "rds.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ @@ -8405,6 +8481,13 @@ var awscnPartition = partition{ }, }, }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "transfer": service{ Endpoints: endpoints{ @@ -9048,17 +9131,6 @@ var awsusgovPartition = partition{ }, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, "ecs": service{ Endpoints: endpoints{ @@ -9623,6 +9695,25 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "mq": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "neptune": service{ Endpoints: endpoints{ @@ -10512,17 +10603,6 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, "ecs": service{ Endpoints: endpoints{ @@ -10913,6 +10993,12 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "ds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "dynamodb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -10929,17 +11015,6 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, - }, - }, "ecs": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index ca956e5f12..8e8636f5f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -48,6 +48,9 @@ type Options struct { // This option is ignored if StrictMatching is enabled. ResolveUnknownService bool + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + EC2MetadataEndpointMode EC2IMDSEndpointModeState + // STS Regional Endpoint flag helps with resolving the STS endpoint STSRegionalEndpoint STSRegionalEndpoint @@ -55,6 +58,33 @@ type Options struct { S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint } +// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. +type EC2IMDSEndpointModeState uint + +// Enumeration values for EC2IMDSEndpointModeState +const ( + EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota + EC2IMDSEndpointModeStateIPv4 + EC2IMDSEndpointModeStateIPv6 +) + +// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset +func (e *EC2IMDSEndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EC2IMDSEndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EC2IMDSEndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EC2IMDSEndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + // STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint // options. type STSRegionalEndpoint int @@ -247,7 +277,7 @@ func RegionsForService(ps []Partition, partitionID, serviceID string) (map[strin if p.ID() != partitionID { continue } - if _, ok := p.p.Services[serviceID]; !ok { + if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) { break } @@ -333,6 +363,7 @@ func (p Partition) Regions() map[string]Region { // enumerating over the services in a partition. func (p Partition) Services() map[string]Service { ss := make(map[string]Service, len(p.p.Services)) + for id := range p.p.Services { ss[id] = Service{ id: id, @@ -340,6 +371,15 @@ func (p Partition) Services() map[string]Service { } } + // Since we have removed the customization that injected this into the model + // we still need to pretend that this is a modeled service. + if _, ok := ss[Ec2metadataServiceID]; !ok { + ss[Ec2metadataServiceID] = Service{ + id: Ec2metadataServiceID, + p: p.p, + } + } + return ss } @@ -400,7 +440,18 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve // an URL that can be resolved to a instance of a service. func (s Service) Regions() map[string]Region { rs := map[string]Region{} - for id := range s.p.Services[s.id].Endpoints { + + service, ok := s.p.Services[s.id] + + // Since ec2metadata customization has been removed we need to check + // if it was defined in non-standard endpoints.json file. If it's not + // then we can return the empty map as there is no regional-endpoints for IMDS. + // Otherwise, we iterate need to iterate the non-standard model. + if s.id == Ec2metadataServiceID && !ok { + return rs + } + + for id := range service.Endpoints { if r, ok := s.p.Regions[id]; ok { rs[id] = Region{ id: id, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index aaff682608..c6c6a03387 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -7,6 +7,11 @@ import ( "strings" ) +const ( + ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest" + ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" +) + var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) type partitions []partition @@ -102,6 +107,12 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( opt.Set(opts...) s, hasService := p.Services[service] + + if service == Ec2metadataServiceID && !hasService { + endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode) + return endpoint, nil + } + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { // Only return error if the resolver will not fallback to creating // endpoint based on service endpoint ID passed in. @@ -129,6 +140,31 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) } +func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { + switch mode { + case EC2IMDSEndpointModeStateIPv6: + return ResolvedEndpoint{ + URL: ec2MetadataEndpointIPv6, + PartitionID: partitionID, + SigningRegion: "aws-global", + SigningName: service, + SigningNameDerived: true, + SigningMethod: "v4", + } + case EC2IMDSEndpointModeStateIPv4: + fallthrough + default: + return ResolvedEndpoint{ + URL: ec2MetadataEndpointIPv4, + PartitionID: partitionID, + SigningRegion: "aws-global", + SigningName: service, + SigningNameDerived: true, + SigningMethod: "v4", + } + } +} + func serviceList(ss services) []string { list := make([]string, 0, len(ss)) for k := range ss { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 9419b518d5..43b56863e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -283,7 +283,7 @@ component must be enclosed in square brackets. The custom EC2 IMDS endpoint can also be specified via the Session options. sess, err := session.NewSessionWithOptions(session.Options{ - EC2IMDSEndpoint: "http://[::1]", + EC2MetadataEndpoint: "http://[::1]", }) */ package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 3cd5d4b5ae..fffe2f350c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -161,10 +161,15 @@ type envConfig struct { // AWS_S3_USE_ARN_REGION=true S3UseARNRegion bool - // Specifies the alternative endpoint to use for EC2 IMDS. + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. // // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState } var ( @@ -231,6 +236,9 @@ var ( ec2IMDSEndpointEnvKey = []string{ "AWS_EC2_METADATA_SERVICE_ENDPOINT", } + ec2IMDSEndpointModeEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", + } useCABundleKey = []string{ "AWS_CA_BUNDLE", } @@ -364,6 +372,9 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { } setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { + return envConfig{}, err + } return cfg, nil } @@ -376,3 +387,17 @@ func setFromEnvVal(dst *string, keys []string) { } } } + +func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + return nil + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 038ae222ff..4b2e057e93 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -283,8 +283,8 @@ type Options struct { Handlers request.Handlers // Allows specifying a custom endpoint to be used by the EC2 IMDS client - // when making requests to the EC2 IMDS API. The must endpoint value must - // include protocol prefix. + // when making requests to the EC2 IMDS API. The endpoint value should + // include the URI scheme. If the scheme is not present it will be defaulted to http. // // If unset, will the EC2 IMDS client will use its default endpoint. // @@ -298,6 +298,11 @@ type Options struct { // // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -375,19 +380,23 @@ func Must(sess *Session, err error) *Session { // Wraps the endpoint resolver with a resolver that will return a custom // endpoint for EC2 IMDS. -func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver { +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver { return endpoints.ResolverFunc( func(service, region string, opts ...func(*endpoints.Options)) ( endpoints.ResolvedEndpoint, error, ) { - if service == ec2MetadataServiceID { + if service == ec2MetadataServiceID && len(endpoint) > 0 { return endpoints.ResolvedEndpoint{ URL: endpoint, SigningName: ec2MetadataServiceID, SigningRegion: region, }, nil + } else if service == ec2MetadataServiceID { + opts = append(opts, func(o *endpoints.Options) { + o.EC2MetadataEndpointMode = mode + }) } - return resolver.EndpointFor(service, region) + return resolver.EndpointFor(service, region, opts...) }) } @@ -404,8 +413,8 @@ func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { cfg.EndpointResolver = endpoints.DefaultResolver() } - if len(envCfg.EC2IMDSEndpoint) != 0 { - cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint) + if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode) } cfg.Credentials = defaults.CredChain(cfg, handlers) @@ -737,12 +746,32 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, endpoints.LegacyS3UsEast1Endpoint, }) - ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint - if len(ec2IMDSEndpoint) == 0 { - ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint + var ec2IMDSEndpoint string + for _, v := range []string{ + sessOpts.EC2IMDSEndpoint, + envCfg.EC2IMDSEndpoint, + sharedCfg.EC2IMDSEndpoint, + } { + if len(v) != 0 { + ec2IMDSEndpoint = v + break + } } - if len(ec2IMDSEndpoint) != 0 { - cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint) + + var endpointMode endpoints.EC2IMDSEndpointModeState + for _, v := range []endpoints.EC2IMDSEndpointModeState{ + sessOpts.EC2IMDSEndpointMode, + envCfg.EC2IMDSEndpointMode, + sharedCfg.EC2IMDSEndpointMode, + } { + if v != endpoints.EC2IMDSEndpointModeStateUnset { + endpointMode = v + break + } + } + + if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) } // Configure credentials if not already set by the user when creating the diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 42b16a7db9..6830ece70f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -66,6 +66,12 @@ const ( // S3 ARN Region Usage s3UseARNRegionKey = "s3_use_arn_region" + + // EC2 IMDS Endpoint Mode + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + // EC2 IMDS Endpoint + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" ) // sharedConfig represents the configuration fields of the SDK config files. @@ -145,6 +151,16 @@ type sharedConfig struct { // // s3_use_arn_region=true S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string } type sharedConfigFile struct { @@ -334,6 +350,12 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.SSORegion, section, ssoRegionKey) updateString(&cfg.SSORoleName, section, ssoRoleNameKey) updateString(&cfg.SSOStartURL, section, ssoStartURL) + + if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + ec2MetadataServiceEndpointModeKey, file.Filename, err) + } + updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) } updateString(&cfg.CredentialProcess, section, credentialProcessKey) @@ -364,6 +386,14 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e return nil } +func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { if err := cfg.validateCredentialsRequireARN(profile); err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go index 07ea799fbd..9937538317 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -34,23 +34,23 @@ func (m mapRule) IsValid(value string) bool { return ok } -// whitelist is a generic rule for whitelisting -type whitelist struct { +// allowList is a generic rule for allow listing +type allowList struct { rule } -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { +// IsValid for allow list checks if the value is within the allow list +func (w allowList) IsValid(value string) bool { return w.rule.IsValid(value) } -// blacklist is a generic rule for blacklisting -type blacklist struct { +// excludeList is a generic rule for exclude listing +type excludeList struct { rule } -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { +// IsValid for exclude list checks if the value is within the exclude list +func (b excludeList) IsValid(value string) bool { return !b.rule.IsValid(value) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 1737c2686d..d4653031f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -90,7 +90,7 @@ const ( ) var ignoredHeaders = rules{ - blacklist{ + excludeList{ mapRule{ authorizationHeader: struct{}{}, "User-Agent": struct{}{}, @@ -99,9 +99,9 @@ var ignoredHeaders = rules{ }, } -// requiredSignedHeaders is a whitelist for build canonical headers. +// requiredSignedHeaders is a allow list for build canonical headers. var requiredSignedHeaders = rules{ - whitelist{ + allowList{ mapRule{ "Cache-Control": struct{}{}, "Content-Disposition": struct{}{}, @@ -145,12 +145,13 @@ var requiredSignedHeaders = rules{ }, }, patterns{"X-Amz-Meta-"}, + patterns{"X-Amz-Object-Lock-"}, } -// allowedHoisting is a whitelist for build query headers. The boolean value +// allowedHoisting is a allow list for build query headers. The boolean value // represents whether or not it is a pattern. var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, + excludeList{requiredSignedHeaders}, patterns{"X-Amz-"}, } @@ -417,7 +418,7 @@ var SignRequestHandler = request.NamedHandler{ // request handler should only be used with the SDK's built in service client's // API operation requests. // -// This function should not be used on its on its own, but in conjunction with +// This function should not be used on its own, but in conjunction with // an AWS service client's API operation call. To sign a standalone request // not created by a service client's API operation method use the "Sign" or // "Presign" functions of the "Signer" type. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 938fdfd135..e5b999a5d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.38.60" +const SDKVersion = "1.40.10" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go index 25ce0fe134..1e55bbd07b 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -13,17 +13,30 @@ // } // // Below is the BNF that describes this parser -// Grammar: -// stmt -> value stmt' -// stmt' -> epsilon | op stmt -// value -> number | string | boolean | quoted_string +// Grammar: +// stmt -> section | stmt' +// stmt' -> epsilon | expr +// expr -> value (stmt)* | equal_expr (stmt)* +// equal_expr -> value ( ':' | '=' ) equal_expr' +// equal_expr' -> number | string | quoted_string +// quoted_string -> " quoted_string' +// quoted_string' -> string quoted_string_end +// quoted_string_end -> " // -// section -> [ section' -// section' -> value section_close -// section_close -> ] +// section -> [ section' +// section' -> section_value section_close +// section_value -> number | string_subset | boolean | quoted_string_subset +// quoted_string_subset -> " quoted_string_subset' +// quoted_string_subset' -> string_subset quoted_string_end +// quoted_string_subset -> " +// section_close -> ] // -// SkipState will skip (NL WS)+ +// value -> number | string_subset | boolean +// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? +// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? // -// comment -> # comment' | ; comment' -// comment' -> epsilon | value +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index 55fa73ebcf..0ba319491c 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -5,9 +5,12 @@ import ( "io" ) +// ParseState represents the current state of the parser. +type ParseState uint + // State enums for the parse table const ( - InvalidState = iota + InvalidState ParseState = iota // stmt -> value stmt' StatementState // stmt' -> MarkComplete | op stmt @@ -36,8 +39,8 @@ const ( ) // parseTable is a state machine to dictate the grammar above. -var parseTable = map[ASTKind]map[TokenType]int{ - ASTKindStart: map[TokenType]int{ +var parseTable = map[ASTKind]map[TokenType]ParseState{ + ASTKindStart: { TokenLit: StatementState, TokenSep: OpenScopeState, TokenWS: SkipTokenState, @@ -45,7 +48,7 @@ var parseTable = map[ASTKind]map[TokenType]int{ TokenComment: CommentState, TokenNone: TerminalState, }, - ASTKindCommentStatement: map[TokenType]int{ + ASTKindCommentStatement: { TokenLit: StatementState, TokenSep: OpenScopeState, TokenWS: SkipTokenState, @@ -53,7 +56,7 @@ var parseTable = map[ASTKind]map[TokenType]int{ TokenComment: CommentState, TokenNone: MarkCompleteState, }, - ASTKindExpr: map[TokenType]int{ + ASTKindExpr: { TokenOp: StatementPrimeState, TokenLit: ValueState, TokenSep: OpenScopeState, @@ -62,13 +65,15 @@ var parseTable = map[ASTKind]map[TokenType]int{ TokenComment: CommentState, TokenNone: MarkCompleteState, }, - ASTKindEqualExpr: map[TokenType]int{ - TokenLit: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, - TokenNone: SkipState, + ASTKindEqualExpr: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, }, - ASTKindStatement: map[TokenType]int{ + ASTKindStatement: { TokenLit: SectionState, TokenSep: CloseScopeState, TokenWS: SkipTokenState, @@ -76,9 +81,9 @@ var parseTable = map[ASTKind]map[TokenType]int{ TokenComment: CommentState, TokenNone: MarkCompleteState, }, - ASTKindExprStatement: map[TokenType]int{ + ASTKindExprStatement: { TokenLit: ValueState, - TokenSep: OpenScopeState, + TokenSep: ValueState, TokenOp: ValueState, TokenWS: ValueState, TokenNL: MarkCompleteState, @@ -86,14 +91,14 @@ var parseTable = map[ASTKind]map[TokenType]int{ TokenNone: TerminalState, TokenComma: SkipState, }, - ASTKindSectionStatement: map[TokenType]int{ + ASTKindSectionStatement: { TokenLit: SectionState, TokenOp: SectionState, TokenSep: CloseScopeState, TokenWS: SectionState, TokenNL: SkipTokenState, }, - ASTKindCompletedSectionStatement: map[TokenType]int{ + ASTKindCompletedSectionStatement: { TokenWS: SkipTokenState, TokenNL: SkipTokenState, TokenLit: StatementState, @@ -101,7 +106,7 @@ var parseTable = map[ASTKind]map[TokenType]int{ TokenComment: CommentState, TokenNone: MarkCompleteState, }, - ASTKindSkipStatement: map[TokenType]int{ + ASTKindSkipStatement: { TokenLit: StatementState, TokenSep: OpenScopeState, TokenWS: SkipTokenState, @@ -205,18 +210,6 @@ loop: case ValueState: // ValueState requires the previous state to either be an equal expression // or an expression statement. - // - // This grammar occurs when the RHS is a number, word, or quoted string. - // equal_expr -> lit op equal_expr' - // equal_expr' -> number | string | quoted_string - // quoted_string -> " quoted_string' - // quoted_string' -> string quoted_string_end - // quoted_string_end -> " - // - // otherwise - // expr_stmt -> equal_expr (expr_stmt')* - // expr_stmt' -> ws S | op S | MarkComplete - // S -> equal_expr' expr_stmt' switch k.Kind { case ASTKindEqualExpr: // assigning a value to some key @@ -243,7 +236,7 @@ loop: } children[len(children)-1] = rhs - k.SetChildren(children) + root.SetChildren(children) stack.Push(k) } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go index 94841c3244..081cf43342 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -50,7 +50,10 @@ func (v *DefaultVisitor) VisitExpr(expr AST) error { rhs := children[1] - if rhs.Root.Type() != TokenLit { + // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. + // If the token is not either a literal or one of the token types that identifies those four additional + // tokens then error. + if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { return NewParseError("unexpected token type") } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 1301b149d3..fb35fee5fe 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -98,7 +98,7 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo // Support the ability to customize values to be marshaled as a // blob even though they were modeled as a string. Required for S3 - // API operations like SSECustomerKey is modeled as stirng but + // API operations like SSECustomerKey is modeled as string but // required to be base64 encoded in request. if field.Tag.Get("marshal-as") == "blob" { m = m.Convert(byteSliceType) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index 98f4caed91..d486a4c2a0 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,6 +1,8 @@ package protocol import ( + "bytes" + "fmt" "math" "strconv" "time" @@ -19,7 +21,9 @@ const ( // Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT - RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" // This format is used for output time without seconds precision RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" @@ -67,10 +71,20 @@ func FormatTime(name string, t time.Time) string { // the time if it was able to be parsed, and fails otherwise. func ParseTime(formatName, value string) (time.Time, error) { switch formatName { - case RFC822TimeFormatName: - return time.Parse(RFC822TimeFormat, value) - case ISO8601TimeFormatName: - return time.Parse(ISO8601TimeFormat, value) + case RFC822TimeFormatName: // Smithy HTTPDate format + return tryParse(value, + RFC822TimeFormat, + rfc822TimeFormatSingleDigitDay, + rfc822TimeFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) + case ISO8601TimeFormatName: // Smithy DateTime format + return tryParse(value, + ISO8601TimeFormat, + time.RFC3339Nano, + time.RFC3339, + ) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) _, dec := math.Modf(v) @@ -83,3 +97,36 @@ func ParseTime(formatName, value string) (time.Time, error) { panic("unknown timestamp format name, " + formatName) } } + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s bytes.Buffer + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 17c4637889..3cffd533d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -57,19 +57,20 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // // Returns a set of temporary security credentials that you can use to access -// AWS resources that you might not normally have access to. These temporary -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use AssumeRole within your account or for cross-account -// access. For a comparison of AssumeRole with other API operations that produce -// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Amazon Web Services resources that you might not normally have access to. +// These temporary credentials consist of an access key ID, a secret access +// key, and a security token. Typically, you use AssumeRole within your account +// or for cross-account access. For a comparison of AssumeRole with other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // Permissions // // The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: You cannot call -// the AWS STS GetFederationToken or GetSessionToken API operations. +// API calls to any Amazon Web Services service with the following exception: +// You cannot call the STS GetFederationToken or GetSessionToken API operations. // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an @@ -79,15 +80,15 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// To assume a role from a different account, your AWS account must be trusted -// by the role. The trust relationship is defined in the role's trust policy -// when the role is created. That trust policy states which accounts are allowed +// To assume a role from a different account, your account must be trusted by +// the role. The trust relationship is defined in the role's trust policy when +// the role is created. That trust policy states which accounts are allowed // to delegate that access to users in the account. // // A user who wants to access a role in a different account must also have permissions @@ -129,12 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // (Optional) You can include multi-factor authentication (MFA) information // when you call AssumeRole. This is useful for cross-account scenarios to ensure -// that the user that assumes the role has been authenticated with an AWS MFA -// device. In that scenario, the trust policy of the role being assumed includes -// a condition that tests for MFA authentication. If the caller does not include -// valid MFA information, the request to assume the role is denied. The condition -// in a trust policy that tests for MFA authentication might look like the following -// example. +// that the user that assumes the role has been authenticated with an Amazon +// Web Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. // // "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} // @@ -160,11 +161,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // You could receive this error even though you meet other defined session policy @@ -176,7 +177,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // * ErrCodeExpiredTokenException "ExpiredTokenException" @@ -252,16 +254,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // Returns a set of temporary security credentials for users who have been authenticated // via a SAML authentication response. This operation provides a mechanism for -// tying an enterprise identity store or directory to role-based AWS access -// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// tying an enterprise identity store or directory to role-based Amazon Web +// Services access without user-specific credentials or configuration. For a +// comparison of AssumeRoleWithSAML with the other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this operation consist of // an access key ID, a secret access key, and a security token. Applications -// can use these temporary security credentials to sign calls to AWS services. +// can use these temporary security credentials to sign calls to Amazon Web +// Services services. // // Session Duration // @@ -281,19 +284,19 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // in the IAM User Guide. // // Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) -// limits your AWS CLI or AWS API role session to a maximum of one hour. When -// you use the AssumeRole API operation to assume a role, you can specify the -// duration of your role session with the DurationSeconds parameter. You can -// specify a parameter value of up to 43200 seconds (12 hours), depending on -// the maximum session duration setting for your role. However, if you assume +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume // a role using role chaining and provide a DurationSeconds parameter value // greater than one hour, the operation fails. // // Permissions // // The temporary security credentials created by AssumeRoleWithSAML can be used -// to make API calls to any AWS service with the following exception: you cannot -// call the STS GetFederationToken or GetSessionToken API operations. +// to make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an @@ -303,18 +306,19 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. -// The identity of the caller is validated by using keys in the metadata document -// that is uploaded for the SAML provider entity for your identity provider. +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys +// in the metadata document that is uploaded for the SAML provider entity for +// your identity provider. // -// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail -// logs. The entry includes the value in the NameID element of the SAML assertion. +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. +// The entry includes the value in the NameID element of the SAML assertion. // We recommend that you use a NameIDType that is not associated with any personally // identifiable information (PII). For example, you could instead use the persistent // identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). @@ -332,11 +336,11 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An AWS conversion compresses the passed session policies and session tags -// into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags -// for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed session policies +// and session tags into a packed binary format that has a separate limit. Your +// request can fail for this limit even if your plaintext meets the other requirements. +// The PackedPolicySize response element indicates by percentage how close the +// policies and tags for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, session tags override the role's tags with the same @@ -356,10 +360,11 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // SAML Configuration // // Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. +// SAML identity provider (IdP) to issue the claims required by Amazon Web Services. +// Additionally, you must use Identity and Access Management (IAM) to create +// a SAML provider entity in your Amazon Web Services account that represents +// your identity provider. You must also create an IAM role that specifies this +// SAML provider in its trust policy. // // For more information, see the following resources: // @@ -389,11 +394,11 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // You could receive this error even though you meet other defined session policy @@ -409,8 +414,9 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // can also mean that the claim has expired or has been explicitly revoked. // // * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. // // * ErrCodeExpiredTokenException "ExpiredTokenException" // The web identity token that was passed is expired or is not valid. Get a @@ -420,7 +426,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML @@ -496,30 +503,33 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // Connect-compatible identity provider. // // For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. +// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide +// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android +// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify +// a user. You can also supply the user with a consistent identity throughout +// the lifetime of an application. // // To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the AWS SDK for iOS Developer Guide. -// -// Calling AssumeRoleWithWebIdentity does not require the use of AWS security -// credentials. Therefore, you can distribute an application (for example, on -// mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application. You also don't need to deploy -// server-based proxy services that use long-term AWS credentials. Instead, -// the identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito +// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the Amazon Web Services SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application +// (for example, on mobile devices) that requests temporary security credentials +// without including long-term Amazon Web Services credentials in the application. +// You also don't need to deploy server-based proxy services that use long-term +// Amazon Web Services credentials. Instead, the identity of the caller is validated +// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this API consist of an access // key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service API operations. +// temporary security credentials to sign calls to Amazon Web Services service +// API operations. // // Session Duration // @@ -539,8 +549,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // Permissions // // The temporary security credentials created by AssumeRoleWithWebIdentity can -// be used to make API calls to any AWS service with the following exception: -// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// be used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken +// API operations. // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an @@ -550,10 +561,10 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent -// AWS API calls to access resources in the account that owns the role. You -// cannot use session policies to grant more permissions than those allowed -// by the identity-based policy of the role that is being assumed. For more -// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Tags @@ -569,11 +580,11 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An AWS conversion compresses the passed session policies and session tags -// into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags -// for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed session policies +// and session tags into a packed binary format that has a separate limit. Your +// request can fail for this limit even if your plaintext meets the other requirements. +// The PackedPolicySize response element indicates by percentage how close the +// policies and tags for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, the session tag overrides the role tag with the same @@ -598,7 +609,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // the identity provider that is associated with the identity token. In other // words, the identity provider must be specified in the role's trust policy. // -// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail // logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) // of the provided web identity token. We recommend that you avoid using any // personally identifiable information (PII) in this field. For example, you @@ -614,10 +625,10 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). // Walk through the process of authenticating through Login with Amazon, // Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to AWS. +// using those credentials to make a request to Amazon Web Services. // -// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and -// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). // These toolkits contain sample apps that show how to invoke the identity // providers. The toolkits then show how to use the information from these // providers to get and use temporary security credentials. @@ -641,11 +652,11 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // You could receive this error even though you meet other defined session policy @@ -668,8 +679,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // error persists, the identity provider might be down or not responding. // // * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by AWS. Get -// a new identity token from the identity provider and then retry the request. +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. // // * ErrCodeExpiredTokenException "ExpiredTokenException" // The web identity token that was passed is expired or is not valid. Get a @@ -679,7 +691,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity @@ -749,16 +762,18 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // DecodeAuthorizationMessage API operation for AWS Security Token Service. // // Decodes additional information about the authorization status of a request -// from an encoded message returned in response to an AWS request. +// from an encoded message returned in response to an Amazon Web Services request. // // For example, if a user is not authorized to perform an operation that he // or she has requested, the request returns a Client.UnauthorizedOperation -// response (an HTTP 403 response). Some AWS operations additionally return -// an encoded message that can provide details about this authorization failure. +// response (an HTTP 403 response). Some Amazon Web Services operations additionally +// return an encoded message that can provide details about this authorization +// failure. // -// Only certain AWS operations return an encoded authorization message. The -// documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether +// that operation returns an encoded message in addition to returning an HTTP +// code. // // The message is encoded because the details of the authorization status can // constitute privileged information that the user who requested the operation @@ -869,12 +884,12 @@ func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *reques // in the IAM User Guide. // // When you pass an access key ID to this operation, it returns the ID of the -// AWS account to which the keys belong. Access key IDs beginning with AKIA -// are long-term credentials for an IAM user or the AWS account root user. Access -// key IDs beginning with ASIA are temporary credentials that are created using -// STS operations. If the account in the response belongs to you, you can sign -// in as the root user and review your root user access keys. Then, you can -// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs +// to you, you can sign in as the root user and review your root user access +// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) // to learn which IAM user owns the keys. To learn who requested the temporary // credentials for an ASIA access key, view the STS events in your CloudTrail // logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) @@ -1050,7 +1065,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // For a comparison of GetFederationToken with the other API operations that // produce temporary credentials, see Requesting Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // You can create a mobile-based or browser-based app that can authenticate @@ -1062,11 +1077,11 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // in the IAM User Guide. // // You can also call GetFederationToken using the security credentials of an -// AWS account root user, but we do not recommend it. Instead, we recommend -// that you create an IAM user for the purpose of the proxy application. Then -// attach a policy to the IAM user that limits federated users to only the actions -// and resources that they need to access. For more information, see IAM Best -// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// Amazon Web Services account root user, but we do not recommend it. Instead, +// we recommend that you create an IAM user for the purpose of the proxy application. +// Then attach a policy to the IAM user that limits federated users to only +// the actions and resources that they need to access. For more information, +// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // // Session duration @@ -1074,15 +1089,16 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default // session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using AWS account root user credentials have a maximum duration -// of 3,600 seconds (1 hour). +// are obtained by using Amazon Web Services account root user credentials have +// a maximum duration of 3,600 seconds (1 hour). // // Permissions // // You can use the temporary credentials created by GetFederationToken in any -// AWS service except the following: +// Amazon Web Services service except the following: // -// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// * You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. // // * You cannot call any STS operations except GetCallerIdentity. // @@ -1126,11 +1142,11 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // in the IAM User Guide. // // You can also call GetFederationToken using the security credentials of an -// AWS account root user, but we do not recommend it. Instead, we recommend -// that you create an IAM user for the purpose of the proxy application. Then -// attach a policy to the IAM user that limits federated users to only the actions -// and resources that they need to access. For more information, see IAM Best -// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// Amazon Web Services account root user, but we do not recommend it. Instead, +// we recommend that you create an IAM user for the purpose of the proxy application. +// Then attach a policy to the IAM user that limits federated users to only +// the actions and resources that they need to access. For more information, +// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // // Session duration @@ -1138,15 +1154,16 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default // session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using AWS account root user credentials have a maximum duration -// of 3,600 seconds (1 hour). +// are obtained by using Amazon Web Services account root user credentials have +// a maximum duration of 3,600 seconds (1 hour). // // Permissions // // You can use the temporary credentials created by GetFederationToken in any -// AWS service except the following: +// Amazon Web Services service except the following: // -// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// * You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. // // * You cannot call any STS operations except GetCallerIdentity. // @@ -1208,11 +1225,11 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" // The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An AWS conversion compresses the -// session policy document, session policy ARNs, and session tags into a packed -// binary format that has a separate limit. The error message indicates by percentage -// how close the policies and tags are to the upper size limit. For more information, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // You could receive this error even though you meet other defined session policy @@ -1224,7 +1241,8 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken @@ -1293,51 +1311,53 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // GetSessionToken API operation for AWS Security Token Service. // -// Returns a set of temporary credentials for an AWS account or IAM user. The -// credentials consist of an access key ID, a secret access key, and a security -// token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. -// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA -// code that is associated with their MFA device. Using the temporary security -// credentials that are returned from the call, IAM users can then make programmatic -// calls to API operations that require MFA authentication. If you do not supply -// a correct MFA code, then the API returns an access denied error. For a comparison -// of GetSessionToken with the other API operations that produce temporary credentials, -// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Returns a set of temporary credentials for an Amazon Web Services account +// or IAM user. The credentials consist of an access key ID, a secret access +// key, and a security token. Typically, you use GetSessionToken if you want +// to use MFA to protect programmatic calls to specific Amazon Web Services +// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would +// need to call GetSessionToken and submit an MFA code that is associated with +// their MFA device. Using the temporary security credentials that are returned +// from the call, IAM users can then make programmatic calls to API operations +// that require MFA authentication. If you do not supply a correct MFA code, +// then the API returns an access denied error. For a comparison of GetSessionToken +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // Session Duration // -// The GetSessionToken operation must be called by using the long-term AWS security -// credentials of the AWS account root user or an IAM user. Credentials that -// are created by IAM users are valid for the duration that you specify. This -// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials -// based on account credentials can range from 900 seconds (15 minutes) up to -// 3,600 seconds (1 hour), with a default of 1 hour. +// The GetSessionToken operation must be called by using the long-term Amazon +// Web Services security credentials of the Amazon Web Services account root +// user or an IAM user. Credentials that are created by IAM users are valid +// for the duration that you specify. This duration can range from 900 seconds +// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default +// of 43,200 seconds (12 hours). Credentials based on account credentials can +// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a +// default of 1 hour. // // Permissions // // The temporary security credentials created by GetSessionToken can be used -// to make API calls to any AWS service with the following exceptions: +// to make API calls to any Amazon Web Services service with the following exceptions: // // * You cannot call any IAM API operations unless MFA authentication information // is included in the request. // // * You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with AWS account root user -// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// We recommend that you do not call GetSessionToken with Amazon Web Services +// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) // by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with AWS. +// and using IAM users for everyday interaction with Amazon Web Services. // // The credentials that are returned by GetSessionToken are based on permissions // associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using AWS account root user credentials, the -// temporary credentials have root user permissions. Similarly, if GetSessionToken -// is called using the credentials of an IAM user, the temporary credentials -// have the same permissions as the IAM user. +// If GetSessionToken is called using Amazon Web Services account root user +// credentials, the temporary credentials have root user permissions. Similarly, +// if GetSessionToken is called using the credentials of an IAM user, the temporary +// credentials have the same permissions as the IAM user. // // For more information about using GetSessionToken to create temporary credentials, // go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) @@ -1355,7 +1375,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken @@ -1401,7 +1422,7 @@ type AssumeRoleInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1413,8 +1434,8 @@ type AssumeRoleInput struct { // of the trusting account might send an external ID to the administrator of // the trusted account. That way, only someone with the ID can assume the role, // rather than everyone in the account. For more information about the external - // ID, see How to Use an External ID When Granting Access to Your AWS Resources - // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // ID, see How to Use an External ID When Granting Access to Your Amazon Web + // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. // // The regex used to validate this parameter is a string of characters consisting @@ -1427,10 +1448,11 @@ type AssumeRoleInput struct { // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // The plaintext that you use for both inline and managed session policies can't @@ -1439,11 +1461,11 @@ type AssumeRoleInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1453,22 +1475,22 @@ type AssumeRoleInput struct { // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plaintext that you use for both inline and managed session policies // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` @@ -1485,7 +1507,7 @@ type AssumeRoleInput struct { // account that owns the role. The role session name is also used in the ARN // of the assumed role principal. This means that subsequent cross-account API // requests that use the temporary security credentials will expose the role - // session name to the external account in their AWS CloudTrail logs. + // session name to the external account in their CloudTrail logs. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can @@ -1510,23 +1532,23 @@ type AssumeRoleInput struct { // // You can require users to specify a source identity when they assume a role. // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in AWS CloudTrail logs to - // determine who took actions with a role. You can use the aws:SourceIdentity - // condition key to further control access to AWS resources based on the value - // of source identity. For more information about using source identity, see - // Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can // also include underscores or any of the following characters: =,.@-. You cannot - // use a value that begins with the text aws:. This prefix is reserved for AWS - // internal use. + // use a value that begins with the text aws:. This prefix is reserved for Amazon + // Web Services internal use. SourceIdentity *string `min:"2" type:"string"` // A list of session tags that you want to pass. Each session tag consists of // a key name and an associated value. For more information about session tags, - // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // see Tagging STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // This parameter is optional. You can pass up to 50 session tags. The plaintext @@ -1535,11 +1557,11 @@ type AssumeRoleInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the role. When you do, session tags override a role tag with the same @@ -1554,7 +1576,7 @@ type AssumeRoleInput struct { // Additionally, if you used temporary credentials to perform this operation, // the new session inherits any transitive session tags from the calling session. // If you pass a session tag with the same key as an inherited tag, the operation - // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // fails. To view the inherited tags for a session, see the CloudTrail logs. // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) // in the IAM User Guide. Tags []*Tag `type:"list"` @@ -1720,7 +1742,8 @@ func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { } // Contains the response to a successful AssumeRole request, including temporary -// AWS credentials that can be used to make AWS requests. +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. type AssumeRoleOutput struct { _ struct{} `type:"structure"` @@ -1749,11 +1772,11 @@ type AssumeRoleOutput struct { // // You can require users to specify a source identity when they assume a role. // You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in AWS CloudTrail logs to - // determine who took actions with a role. You can use the aws:SourceIdentity - // condition key to further control access to AWS resources based on the value - // of source identity. For more information about using source identity, see - // Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) // in the IAM User Guide. // // The regex used to validate this parameter is a string of characters consisting @@ -1819,7 +1842,7 @@ type AssumeRoleWithSAMLInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1828,10 +1851,11 @@ type AssumeRoleWithSAMLInput struct { // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // The plaintext that you use for both inline and managed session policies can't @@ -1840,11 +1864,11 @@ type AssumeRoleWithSAMLInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1854,22 +1878,22 @@ type AssumeRoleWithSAMLInput struct { // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plaintext that you use for both inline and managed session policies // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` @@ -1984,7 +2008,8 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML } // Contains the response to a successful AssumeRoleWithSAML request, including -// temporary AWS credentials that can be used to make AWS requests. +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. type AssumeRoleWithSAMLOutput struct { _ struct{} `type:"structure"` @@ -2010,7 +2035,7 @@ type AssumeRoleWithSAMLOutput struct { // // * The Issuer response value. // - // * The AWS account ID. + // * The Amazon Web Services account ID. // // * The friendly name (the last part of the ARN) of the SAML provider in // IAM. @@ -2148,7 +2173,7 @@ type AssumeRoleWithWebIdentityInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -2157,10 +2182,11 @@ type AssumeRoleWithWebIdentityInput struct { // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use - // the role's temporary credentials in subsequent AWS API calls to access resources - // in the account that owns the role. You cannot use session policies to grant - // more permissions than those allowed by the identity-based policy of the role - // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // The plaintext that you use for both inline and managed session policies can't @@ -2169,11 +2195,11 @@ type AssumeRoleWithWebIdentityInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2183,22 +2209,22 @@ type AssumeRoleWithWebIdentityInput struct { // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plaintext that you use for both inline and managed session policies // can't exceed 2,048 characters. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based // policy and the session policies. You can use the role's temporary credentials - // in subsequent AWS API calls to access resources in the account that owns - // the role. You cannot use session policies to grant more permissions than - // those allowed by the identity-based policy of the role that is being assumed. - // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` @@ -2338,7 +2364,8 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo } // Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary AWS credentials that can be used to make AWS requests. +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. type AssumeRoleWithWebIdentityOutput struct { _ struct{} `type:"structure"` @@ -2471,8 +2498,8 @@ type AssumedRoleUser struct { Arn *string `min:"20" type:"string" required:"true"` // A unique identifier that contains the role ID and the role session name of - // the role that is being assumed. The role ID is generated by AWS when the - // role is created. + // the role that is being assumed. The role ID is generated by Amazon Web Services + // when the role is created. // // AssumedRoleId is a required field AssumedRoleId *string `min:"2" type:"string" required:"true"` @@ -2500,7 +2527,7 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { return s } -// AWS credentials for API authentication. +// Amazon Web Services credentials for API authentication. type Credentials struct { _ struct{} `type:"structure"` @@ -2601,8 +2628,8 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut } // A document that contains additional information about the authorization status -// of a request from an encoded message that is returned in response to an AWS -// request. +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` @@ -2714,7 +2741,7 @@ func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput type GetAccessKeyInfoOutput struct { _ struct{} `type:"structure"` - // The number used to identify the AWS account. + // The number used to identify the Amazon Web Services account. Account *string `type:"string"` } @@ -2753,11 +2780,11 @@ func (s GetCallerIdentityInput) GoString() string { type GetCallerIdentityOutput struct { _ struct{} `type:"structure"` - // The AWS account ID number of the account that owns or contains the calling - // entity. + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. Account *string `type:"string"` - // The AWS ARN associated with the calling entity. + // The Amazon Web Services ARN associated with the calling entity. Arn *string `min:"20" type:"string"` // The unique identifier of the calling entity. The exact value depends on the @@ -2801,9 +2828,10 @@ type GetFederationTokenInput struct { // The duration, in seconds, that the session should last. Acceptable durations // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using AWS account root user credentials are restricted to a maximum of 3,600 - // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using root user credentials defaults to one hour. + // using Amazon Web Services account root user credentials are restricted to + // a maximum of 3,600 seconds (one hour). If the specified duration is longer + // than one hour, the session obtained by using root user credentials defaults + // to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the @@ -2848,11 +2876,11 @@ type GetFederationTokenInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2865,8 +2893,8 @@ type GetFederationTokenInput struct { // use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. You can provide // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. // // This parameter is optional. However, if you do not pass any session policies, // then the resulting federated user session has no permissions. @@ -2885,11 +2913,11 @@ type GetFederationTokenInput struct { // by the policy. These permissions are granted in addition to the permissions // that are granted by the session policies. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` // A list of session tags. Each session tag consists of a key name and an associated @@ -2903,11 +2931,11 @@ type GetFederationTokenInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An AWS conversion compresses the passed session policies and session tags - // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags - // for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the user you are federating. When you do, session tags override a user @@ -3004,7 +3032,8 @@ func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { } // Contains the response to a successful GetFederationToken request, including -// temporary AWS credentials that can be used to make AWS requests. +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. type GetFederationTokenOutput struct { _ struct{} `type:"structure"` @@ -3062,9 +3091,9 @@ type GetSessionTokenInput struct { // The duration, in seconds, that the credentials should remain valid. Acceptable // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3,600 seconds (one - // hour). If the duration is longer than one hour, the session for AWS account - // owners defaults to one hour. + // for Amazon Web Services account owners are restricted to a maximum of 3,600 + // seconds (one hour). If the duration is longer than one hour, the session + // for Amazon Web Services account owners defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The identification number of the MFA device that is associated with the IAM @@ -3072,7 +3101,7 @@ type GetSessionTokenInput struct { // user has a policy that requires MFA authentication. The value is either the // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the AWS Management Console + // You can find the device for an IAM user by going to the Management Console // and viewing the user's security credentials. // // The regex used to validate this parameter is a string of characters consisting @@ -3139,7 +3168,8 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { } // Contains the response to a successful GetSessionToken request, including -// temporary AWS credentials that can be used to make AWS requests. +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. type GetSessionTokenOutput struct { _ struct{} `type:"structure"` @@ -3174,8 +3204,8 @@ type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session // policy for the role. For more information about ARNs, see Amazon Resource - // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the AWS General Reference. + // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. Arn *string `locationName:"arn" min:"20" type:"string"` } @@ -3210,9 +3240,9 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { // You can pass custom key-value pair attributes when you assume a role or federate // a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging AWS STS -// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. +// to control access to resources. For more information, see Tagging STS Sessions +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in +// the IAM User Guide. type Tag struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index cb1debbaa4..2d98d92353 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -3,11 +3,11 @@ // Package sts provides the client and types for making API // requests to AWS Security Token Service. // -// AWS Security Token Service (STS) enables you to request temporary, limited-privilege -// credentials for AWS Identity and Access Management (IAM) users or for users -// that you authenticate (federated users). This guide provides descriptions -// of the STS API. For more information about using this service, see Temporary -// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for Identity and Access Management (IAM) users or for users that +// you authenticate (federated users). This guide provides descriptions of the +// STS API. For more information about using this service, see Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index a233f542ef..7897d70c87 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -42,8 +42,9 @@ const ( // ErrCodeInvalidIdentityTokenException for service response error code // "InvalidIdentityToken". // - // The web identity token that was passed could not be validated by AWS. Get - // a new identity token from the identity provider and then retry the request. + // The web identity token that was passed could not be validated by Amazon Web + // Services. Get a new identity token from the identity provider and then retry + // the request. ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" // ErrCodeMalformedPolicyDocumentException for service response error code @@ -57,11 +58,11 @@ const ( // "PackedPolicyTooLarge". // // The request was rejected because the total packed size of the session policies - // and session tags combined was too large. An AWS conversion compresses the - // session policy document, session policy ARNs, and session tags into a packed - // binary format that has a separate limit. The error message indicates by percentage - // how close the policies and tags are to the upper size limit. For more information, - // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // and session tags combined was too large. An Amazon Web Services conversion + // compresses the session policy document, session policy ARNs, and session + // tags into a packed binary format that has a separate limit. The error message + // indicates by percentage how close the policies and tags are to the upper + // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // You could receive this error even though you meet other defined session policy @@ -76,7 +77,8 @@ const ( // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // and Deactivating Amazon Web Services STS in an Amazon Web Services Region + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. ErrCodeRegionDisabledException = "RegionDisabledException" ) diff --git a/vendor/github.com/dennwc/varint/.gitignore b/vendor/github.com/dennwc/varint/.gitignore new file mode 100644 index 0000000000..9385b6db18 --- /dev/null +++ b/vendor/github.com/dennwc/varint/.gitignore @@ -0,0 +1,2 @@ +*.o +*.txt \ No newline at end of file diff --git a/vendor/github.com/dennwc/varint/.travis.yml b/vendor/github.com/dennwc/varint/.travis.yml new file mode 100644 index 0000000000..b3da258f52 --- /dev/null +++ b/vendor/github.com/dennwc/varint/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.12.x + +env: + - GO111MODULE=on \ No newline at end of file diff --git a/vendor/github.com/dennwc/varint/LICENSE b/vendor/github.com/dennwc/varint/LICENSE new file mode 100644 index 0000000000..8b3f68715c --- /dev/null +++ b/vendor/github.com/dennwc/varint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Denys Smirnov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dennwc/varint/README.md b/vendor/github.com/dennwc/varint/README.md new file mode 100644 index 0000000000..fe15b3b500 --- /dev/null +++ b/vendor/github.com/dennwc/varint/README.md @@ -0,0 +1,47 @@ +# varint + +This package provides an optimized implementation of protobuf's varint encoding/decoding. +It has no dependencies. + +Benchmarks comparing to a `binary.Uvarint`: + +``` +benchmark old ns/op new ns/op delta +BenchmarkUvarint/1-8 4.13 2.85 -30.99% +BenchmarkUvarint/1_large-8 4.01 2.28 -43.14% +BenchmarkUvarint/2-8 6.23 2.87 -53.93% +BenchmarkUvarint/2_large-8 5.60 2.86 -48.93% +BenchmarkUvarint/3-8 6.55 3.44 -47.48% +BenchmarkUvarint/3_large-8 6.54 2.86 -56.27% +BenchmarkUvarint/4-8 7.30 3.71 -49.18% +BenchmarkUvarint/4_large-8 7.46 3.10 -58.45% +BenchmarkUvarint/5-8 8.31 4.12 -50.42% +BenchmarkUvarint/5_large-8 8.56 3.48 -59.35% +BenchmarkUvarint/6-8 9.42 4.66 -50.53% +BenchmarkUvarint/6_large-8 9.91 4.07 -58.93% +BenchmarkUvarint/7-8 10.6 5.28 -50.19% +BenchmarkUvarint/7_large-8 11.0 4.70 -57.27% +BenchmarkUvarint/8-8 11.7 6.02 -48.55% +BenchmarkUvarint/8_large-8 12.1 5.19 -57.11% +BenchmarkUvarint/9-8 12.9 6.83 -47.05% +BenchmarkUvarint/9_large-8 13.1 5.71 -56.41% +``` + +It also provides additional functionality like `UvarintSize` (similar to `sov*` in `gogo/protobuf`): + +``` +benchmark old ns/op new ns/op delta +BenchmarkUvarintSize/1-8 1.71 0.43 -74.85% +BenchmarkUvarintSize/2-8 2.56 0.57 -77.73% +BenchmarkUvarintSize/3-8 3.22 0.72 -77.64% +BenchmarkUvarintSize/4-8 3.74 0.72 -80.75% +BenchmarkUvarintSize/5-8 4.29 0.57 -86.71% +BenchmarkUvarintSize/6-8 4.85 0.58 -88.04% +BenchmarkUvarintSize/7-8 5.43 0.71 -86.92% +BenchmarkUvarintSize/8-8 6.01 0.86 -85.69% +BenchmarkUvarintSize/9-8 6.64 1.00 -84.94% +``` + +# License + +MIT \ No newline at end of file diff --git a/vendor/github.com/dennwc/varint/go.mod b/vendor/github.com/dennwc/varint/go.mod new file mode 100644 index 0000000000..f7883d4fed --- /dev/null +++ b/vendor/github.com/dennwc/varint/go.mod @@ -0,0 +1,3 @@ +module github.com/dennwc/varint + +go 1.12 diff --git a/vendor/github.com/dennwc/varint/proto.go b/vendor/github.com/dennwc/varint/proto.go new file mode 100644 index 0000000000..e3b458547f --- /dev/null +++ b/vendor/github.com/dennwc/varint/proto.go @@ -0,0 +1,244 @@ +package varint + +// ProtoTag decodes a protobuf's field number and wire type pair +// from buf and returns that value and the number of bytes read (> 0). +// If an error occurred, n = 0 is returned. +func ProtoTag(buf []byte) (num int, typ byte, n int) { + // Same unrolled implementation as in Uvarint. + // + // But this time we can check if the wire type and field num + // are valid when reading the first byte. + // + // Also, the swifts are now different, because first 3 bits + // are for the wire type. + // + // The implementation will stop at 9 bytes, returning an error. + sz := len(buf) + if sz == 0 { + return 0, 0, 0 + } + const ( + bit = 1 << 7 + mask = bit - 1 + step = 7 + + // protobuf + typBits = 3 + typMask = 1<<3 - 1 + ) + if sz >= 9 { // no bound checks + // i == 0 + b := buf[0] + if b == 0 { + return 0, 0, 0 + } + typ = b & typMask + if typ > 5 { + return 0, 0, 0 + } + if b < bit { + num = int(b >> typBits) + if num == 0 { + return 0, 0, 0 + } + n = 1 + return + } + num = int((b & mask) >> typBits) + var s uint = step - typBits + + // i == 1 + b = buf[1] + if b < bit { + num |= int(b) << s + n = 2 + return + } + num |= int(b&mask) << s + s += step + + // i == 2 + b = buf[2] + if b < bit { + num |= int(b) << s + n = 3 + return + } + num |= int(b&mask) << s + s += step + + // i == 3 + b = buf[3] + if b < bit { + num |= int(b) << s + n = 4 + return + } + num |= int(b&mask) << s + s += step + + // i == 4 + b = buf[4] + if b < bit { + num |= int(b) << s + n = 5 + return + } + num |= int(b&mask) << s + s += step + + // i == 5 + b = buf[5] + if b < bit { + num |= int(b) << s + n = 6 + return + } + num |= int(b&mask) << s + s += step + + // i == 6 + b = buf[6] + if b < bit { + num |= int(b) << s + n = 7 + return + } + num |= int(b&mask) << s + s += step + + // i == 7 + b = buf[7] + if b < bit { + num |= int(b) << s + n = 8 + return + } + num |= int(b&mask) << s + s += step + + // i == 8 + b = buf[8] + if b < bit { + num |= int(b) << s + n = 9 + return + } + return 0, 0, 0 // too much + } + + // i == 0 + b := buf[0] + if b == 0 { + return 0, 0, 0 + } + typ = b & typMask + if typ > 5 { + return 0, 0, 0 + } + if b < bit { + num = int(b >> typBits) + if num == 0 { + return 0, 0, 0 + } + n = 1 + return + } else if sz == 1 { + return 0, 0, 0 + } + num = int((b & mask) >> typBits) + var s uint = step - typBits + + // i == 1 + b = buf[1] + if b < bit { + num |= int(b) << s + n = 2 + return + } else if sz == 2 { + return 0, 0, 0 + } + num |= int(b&mask) << s + s += step + + // i == 2 + b = buf[2] + if b < bit { + num |= int(b) << s + n = 3 + return + } else if sz == 3 { + return 0, 0, 0 + } + num |= int(b&mask) << s + s += step + + // i == 3 + b = buf[3] + if b < bit { + num |= int(b) << s + n = 4 + return + } else if sz == 4 { + return 0, 0, 0 + } + num |= int(b&mask) << s + s += step + + // i == 4 + b = buf[4] + if b < bit { + num |= int(b) << s + n = 5 + return + } else if sz == 5 { + return 0, 0, 0 + } + num |= int(b&mask) << s + s += step + + // i == 5 + b = buf[5] + if b < bit { + num |= int(b) << s + n = 6 + return + } else if sz == 6 { + return 0, 0, 0 + } + num |= int(b&mask) << s + s += step + + // i == 6 + b = buf[6] + if b < bit { + num |= int(b) << s + n = 7 + return + } else if sz == 7 { + return 0, 0, 0 + } + num |= int(b&mask) << s + s += step + + // i == 7 + b = buf[7] + if b < bit { + num |= int(b) << s + n = 8 + return + } else if sz == 8 { + return 0, 0, 0 + } + num |= int(b&mask) << s + s += step + + // i == 8 + b = buf[8] + if b < bit { + num |= int(b) << s + n = 9 + return + } + return 0, 0, 0 // too much +} diff --git a/vendor/github.com/dennwc/varint/varint.go b/vendor/github.com/dennwc/varint/varint.go new file mode 100644 index 0000000000..83278c2d7d --- /dev/null +++ b/vendor/github.com/dennwc/varint/varint.go @@ -0,0 +1,270 @@ +package varint + +const maxUint64 = uint64(1<<64 - 1) + +// MaxLenN is the maximum length of a varint-encoded N-bit integer. +const ( + MaxLen8 = 2 + MaxLen16 = 3 + MaxLen32 = 5 + MaxLen64 = 10 +) + +// MaxValN is the maximum varint-encoded integer that fits in N bytes. +const ( + MaxVal9 = maxUint64 >> (1 + iota*7) + MaxVal8 + MaxVal7 + MaxVal6 + MaxVal5 + MaxVal4 + MaxVal3 + MaxVal2 + MaxVal1 +) + +// UvarintSize returns the number of bytes necessary to encode a given uint. +func UvarintSize(x uint64) int { + if x <= MaxVal4 { + if x <= MaxVal1 { + return 1 + } else if x <= MaxVal2 { + return 2 + } else if x <= MaxVal3 { + return 3 + } + return 4 + } + if x <= MaxVal5 { + return 5 + } else if x <= MaxVal6 { + return 6 + } else if x <= MaxVal7 { + return 7 + } else if x <= MaxVal8 { + return 8 + } else if x <= MaxVal9 { + return 9 + } + return 10 +} + +// Uvarint decodes a uint64 from buf and returns that value and the +// number of bytes read (> 0). If an error occurred, the value is 0 +// and the number of bytes n is <= 0 meaning: +// +// n == 0: buf too small +// n < 0: value larger than 64 bits (overflow) +// and -n is the number of bytes read +// +func Uvarint(buf []byte) (uint64, int) { + // Fully unrolled implementation of binary.Uvarint. + // + // It will also eliminate bound checks for buffers larger than 9 bytes. + sz := len(buf) + if sz == 0 { + return 0, 0 + } + const ( + step = 7 + bit = 1 << 7 + mask = bit - 1 + ) + if sz >= 10 { // no bound checks + // i == 0 + b := buf[0] + if b < bit { + return uint64(b), 1 + } + x := uint64(b & mask) + var s uint = step + + // i == 1 + b = buf[1] + if b < bit { + return x | uint64(b)< 1 { + return 0, -10 // overflow + } + return x | uint64(b)< 1 { + return 0, -10 // overflow + } + return x | uint64(b)< +Eric Buth Google Inc. Jan Mercl <0xjnml@gmail.com> Klaus Post diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS index d9914732b5..ea6524ddd0 100644 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -26,7 +26,9 @@ # Please keep the list sorted. +Alex Legg Damian Gryski +Eric Buth Jan Mercl <0xjnml@gmail.com> Jonathan Swinney Kai Backman diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go index f1e04b172c..23c6e26c6b 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/golang/snappy/decode.go @@ -118,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { return true } -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } +func (r *Reader) fill() error { + for r.i >= r.j { if !r.readFull(r.buf[:4], true) { - return 0, r.err + return r.err } chunkType := r.buf[0] if !r.readHeader { if chunkType != chunkTypeStreamIdentifier { r.err = ErrCorrupt - return 0, r.err + return r.err } r.readHeader = true } chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 if chunkLen > len(r.buf) { r.err = ErrUnsupported - return 0, r.err + return r.err } // The chunk types are specified at @@ -153,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.2. Compressed data (chunk type 0x00). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:chunkLen] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 buf = buf[checksumSize:] @@ -165,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) { n, err := DecodedLen(buf) if err != nil { r.err = err - return 0, r.err + return r.err } if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if _, err := Decode(r.decoded, buf); err != nil { r.err = err - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -186,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.3. Uncompressed data (chunk type 0x01). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:checksumSize] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.decoded[:n], false) { - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -213,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.1. Stream identifier (chunk type 0xff). if chunkLen != len(magicBody) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err + return r.err } for i := 0; i < len(magicBody); i++ { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt - return 0, r.err + return r.err } } continue @@ -230,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) { if chunkType <= 0x7f { // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). r.err = ErrUnsupported - return 0, r.err + return r.err } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err + return r.err } } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil } diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s index bf83667d71..f8d54adfc5 100644 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -382,7 +382,7 @@ inner0: // if load32(src, s) != load32(src, candidate) { continue } break MOVW 0(R7), R3 - MOVW (R6)(R15*1), R4 + MOVW (R6)(R15), R4 CMPW R4, R3 BNE inner0 @@ -672,7 +672,7 @@ inlineEmitCopyEnd: MOVHU R3, 0(R17)(R11<<1) // if uint32(x>>8) == load32(src, candidate) { continue } - MOVW (R6)(R15*1), R4 + MOVW (R6)(R15), R4 CMPW R4, R14 BEQ inner1 diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 92e422eed7..35eea9f106 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,11 +1,10 @@ # gorilla/mux [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) [![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) +![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) https://www.gorillatoolkit.org/pkg/mux @@ -26,6 +25,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv * [Examples](#examples) * [Matching Routes](#matching-routes) * [Static Files](#static-files) +* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) * [Registered URLs](#registered-urls) * [Walking Routes](#walking-routes) * [Graceful Shutdown](#graceful-shutdown) @@ -212,6 +212,93 @@ func main() { } ``` +### Serving Single Page Applications + +Most of the time it makes sense to serve your SPA on a separate web server from your API, +but sometimes it's desirable to serve them both from one place. It's possible to write a simple +handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage +mux's powerful routing for your API endpoints. + +```go +package main + +import ( + "encoding/json" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/gorilla/mux" +) + +// spaHandler implements the http.Handler interface, so we can use it +// to respond to HTTP requests. The path to the static directory and +// path to the index file within that static directory are used to +// serve the SPA in the given static directory. +type spaHandler struct { + staticPath string + indexPath string +} + +// ServeHTTP inspects the URL path to locate a file within the static dir +// on the SPA handler. If a file is found, it will be served. If not, the +// file located at the index path on the SPA handler will be served. This +// is suitable behavior for serving an SPA (single page application). +func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // get the absolute path to prevent directory traversal + path, err := filepath.Abs(r.URL.Path) + if err != nil { + // if we failed to get the absolute path respond with a 400 bad request + // and stop + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // prepend the path with the path to the static directory + path = filepath.Join(h.staticPath, path) + + // check whether a file exists at the given path + _, err = os.Stat(path) + if os.IsNotExist(err) { + // file does not exist, serve index.html + http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) + return + } else if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // otherwise, use http.FileServer to serve the static dir + http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) +} + +func main() { + router := mux.NewRouter() + + router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { + // an example API handler + json.NewEncoder(w).Encode(map[string]bool{"ok": true}) + }) + + spa := spaHandler{staticPath: "build", indexPath: "index.html"} + router.PathPrefix("/").Handler(spa) + + srv := &http.Server{ + Handler: router, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + ### Registered URLs Now let's see how to build registered URLs. diff --git a/vendor/github.com/gorilla/mux/context.go b/vendor/github.com/gorilla/mux/context.go deleted file mode 100644 index 665940a268..0000000000 --- a/vendor/github.com/gorilla/mux/context.go +++ /dev/null @@ -1,18 +0,0 @@ -package mux - -import ( - "context" - "net/http" -) - -func contextGet(r *http.Request, key interface{}) interface{} { - return r.Context().Value(key) -} - -func contextSet(r *http.Request, key, val interface{}) *http.Request { - if val == nil { - return r - } - - return r.WithContext(context.WithValue(r.Context(), key, val)) -} diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod index cfc8ede581..df170a3994 100644 --- a/vendor/github.com/gorilla/mux/go.mod +++ b/vendor/github.com/gorilla/mux/go.mod @@ -1 +1,3 @@ module github.com/gorilla/mux + +go 1.12 diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go index cf2b26dc03..cb51c565eb 100644 --- a/vendor/github.com/gorilla/mux/middleware.go +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -58,22 +58,17 @@ func CORSMethodMiddleware(r *Router) MiddlewareFunc { func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { var allMethods []string - err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { - for _, m := range route.matchers { - if _, ok := m.(*routeRegexp); ok { - if m.Match(req, &RouteMatch{}) { - methods, err := route.GetMethods() - if err != nil { - return err - } - - allMethods = append(allMethods, methods...) - } - break + for _, route := range r.routes { + var match RouteMatch + if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { + methods, err := route.GetMethods() + if err != nil { + return nil, err } + + allMethods = append(allMethods, methods...) } - return nil - }) + } - return allMethods, err + return allMethods, nil } diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index a2cd193e48..782a34b22a 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -5,6 +5,7 @@ package mux import ( + "context" "errors" "fmt" "net/http" @@ -58,8 +59,7 @@ type Router struct { // If true, do not clear the request context after handling the request. // - // Deprecated: No effect when go1.7+ is used, since the context is stored - // on the request itself. + // Deprecated: No effect, since the context is stored on the request itself. KeepContext bool // Slice of middlewares to be called after a match is found @@ -111,10 +111,8 @@ func copyRouteConf(r routeConf) routeConf { c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) } - c.matchers = make([]matcher, 0, len(r.matchers)) - for _, m := range r.matchers { - c.matchers = append(c.matchers, m) - } + c.matchers = make([]matcher, len(r.matchers)) + copy(c.matchers, r.matchers) return c } @@ -197,8 +195,8 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { var handler http.Handler if r.Match(req, &match) { handler = match.Handler - req = setVars(req, match.Vars) - req = setCurrentRoute(req, match.Route) + req = requestWithVars(req, match.Vars) + req = requestWithRoute(req, match.Route) } if handler == nil && match.MatchErr == ErrMethodMismatch { @@ -428,7 +426,7 @@ const ( // Vars returns the route variables for the current request, if any. func Vars(r *http.Request) map[string]string { - if rv := contextGet(r, varsKey); rv != nil { + if rv := r.Context().Value(varsKey); rv != nil { return rv.(map[string]string) } return nil @@ -437,21 +435,22 @@ func Vars(r *http.Request) map[string]string { // CurrentRoute returns the matched route for the current request, if any. // This only works when called inside the handler of the matched route // because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. +// after the handler returns. func CurrentRoute(r *http.Request) *Route { - if rv := contextGet(r, routeKey); rv != nil { + if rv := r.Context().Value(routeKey); rv != nil { return rv.(*Route) } return nil } -func setVars(r *http.Request, val interface{}) *http.Request { - return contextSet(r, varsKey, val) +func requestWithVars(r *http.Request, vars map[string]string) *http.Request { + ctx := context.WithValue(r.Context(), varsKey, vars) + return r.WithContext(ctx) } -func setCurrentRoute(r *http.Request, val interface{}) *http.Request { - return contextSet(r, routeKey, val) +func requestWithRoute(r *http.Request, route *Route) *http.Request { + ctx := context.WithValue(r.Context(), routeKey, route) + return r.WithContext(ctx) } // ---------------------------------------------------------------------------- diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index ac1abcd473..0144842bb2 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -181,21 +181,21 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { } } return r.regexp.MatchString(host) - } else { - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() - } - return r.regexp.MatchString(path) } + + if r.regexpType == regexpTypeQuery { + return r.matchQueryString(req) + } + path := req.URL.Path + if r.options.useEncodedPath { + path = req.URL.EscapedPath() + } + return r.regexp.MatchString(path) } // url builds a URL part using the given values. func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) + urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { @@ -230,14 +230,51 @@ func (r *routeRegexp) getURLQuery(req *http.Request) string { return "" } templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } + val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) + if ok { + return templateKey + "=" + val } return "" } +// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. +// If key was not found, empty string and false is returned. +func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { + query := []byte(rawQuery) + for len(query) > 0 { + foundKey := query + if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { + foundKey, query = foundKey[:i], foundKey[i+1:] + } else { + query = query[:0] + } + if len(foundKey) == 0 { + continue + } + var value []byte + if i := bytes.IndexByte(foundKey, '='); i >= 0 { + foundKey, value = foundKey[:i], foundKey[i+1:] + } + if len(foundKey) < len(key) { + // Cannot possibly be key. + continue + } + keyString, err := url.QueryUnescape(string(foundKey)) + if err != nil { + continue + } + if keyString != key { + continue + } + valueString, err := url.QueryUnescape(string(value)) + if err != nil { + continue + } + return valueString, true + } + return "", false +} + func (r *routeRegexp) matchQueryString(req *http.Request) bool { return r.regexp.MatchString(r.getURLQuery(req)) } @@ -288,6 +325,12 @@ func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { host := getHost(req) + if v.host.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } matches := v.host.regexp.FindStringSubmatchIndex(host) if len(matches) > 0 { extractVars(host, matches, v.host.varsN, m.Vars) diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 8479c68c1d..750afe570d 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -74,7 +74,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { return false } - if match.MatchErr == ErrMethodMismatch { + if match.MatchErr == ErrMethodMismatch && r.handler != nil { // We found a route which matches request method, clear MatchErr match.MatchErr = nil // Then override the mis-matched handler @@ -412,11 +412,30 @@ func (r *Route) Queries(pairs ...string) *Route { type schemeMatcher []string func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) + scheme := r.URL.Scheme + // https://golang.org/pkg/net/http/#Request + // "For [most] server requests, fields other than Path and RawQuery will be + // empty." + // Since we're an http muxer, the scheme is either going to be http or https + // though, so we can just set it based on the tls termination state. + if scheme == "" { + if r.TLS == nil { + scheme = "http" + } else { + scheme = "https" + } + } + return matchInArray(m, scheme) } // Schemes adds a matcher for URL schemes. // It accepts a sequence of schemes to be matched, e.g.: "http", "https". +// If the request's URL has a scheme set, it will be matched against. +// Generally, the URL scheme will only be set if a previous handler set it, +// such as the ProxyHeaders handler from gorilla/handlers. +// If unset, the scheme will be determined based on the request's TLS +// termination state. +// The first argument to Schemes will be used when constructing a route URL. func (r *Route) Schemes(schemes ...string) *Route { for k, v := range schemes { schemes[k] = strings.ToLower(v) @@ -493,8 +512,8 @@ func (r *Route) Subrouter() *Router { // This also works for host variables: // // r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). // Name("article") // // // url.String() will be "http://news.domain.com/articles/technology/42" @@ -502,6 +521,13 @@ func (r *Route) Subrouter() *Router { // "category", "technology", // "id", "42") // +// The scheme of the resulting url will be the first argument that was passed to Schemes: +// +// // url.String() will be "https://example.com" +// r := mux.NewRouter() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() +// // All variables defined in the route are required, and their values must // conform to the corresponding patterns. func (r *Route) URL(pairs ...string) (*url.URL, error) { @@ -635,7 +661,7 @@ func (r *Route) GetQueriesRegexp() ([]string, error) { if r.regexp.queries == nil { return nil, errors.New("mux: route doesn't have queries") } - var queries []string + queries := make([]string, 0, len(r.regexp.queries)) for _, query := range r.regexp.queries { queries = append(queries, query.regexp.String()) } @@ -654,7 +680,7 @@ func (r *Route) GetQueriesTemplates() ([]string, error) { if r.regexp.queries == nil { return nil, errors.New("mux: route doesn't have queries") } - var queries []string + queries := make([]string, 0, len(r.regexp.queries)) for _, query := range r.regexp.queries { queries = append(queries, query.template) } diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go index 32ecffde48..5f5c496de0 100644 --- a/vendor/github.com/gorilla/mux/test_helpers.go +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -15,5 +15,5 @@ import "net/http" // can be set by making a route that captures the required variables, // starting a server and sending the request to that server. func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return setVars(r, val) + return requestWithVars(r, val) } diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go index 7453feb8a5..d94c2807a7 100644 --- a/vendor/github.com/hashicorp/consul/api/acl.go +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -270,16 +270,61 @@ type ACLAuthMethodNamespaceRule struct { type ACLAuthMethodListEntry struct { Name string Type string - DisplayName string `json:",omitempty"` - Description string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 + DisplayName string `json:",omitempty"` + Description string `json:",omitempty"` + MaxTokenTTL time.Duration `json:",omitempty"` + + // TokenLocality defines the kind of token that this auth method produces. + // This can be either 'local' or 'global'. If empty 'local' is assumed. + TokenLocality string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 // Namespace is the namespace the ACLAuthMethodListEntry is associated with. // Namespacing is a Consul Enterprise feature. Namespace string `json:",omitempty"` } +// This is nearly identical to the ACLAuthMethod MarshalJSON +func (m *ACLAuthMethodListEntry) MarshalJSON() ([]byte, error) { + type Alias ACLAuthMethodListEntry + exported := &struct { + MaxTokenTTL string `json:",omitempty"` + *Alias + }{ + MaxTokenTTL: m.MaxTokenTTL.String(), + Alias: (*Alias)(m), + } + if m.MaxTokenTTL == 0 { + exported.MaxTokenTTL = "" + } + + return json.Marshal(exported) +} + +// This is nearly identical to the ACLAuthMethod UnmarshalJSON +func (m *ACLAuthMethodListEntry) UnmarshalJSON(data []byte) error { + type Alias ACLAuthMethodListEntry + aux := &struct { + MaxTokenTTL string + *Alias + }{ + Alias: (*Alias)(m), + } + + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + var err error + if aux.MaxTokenTTL != "" { + if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil { + return err + } + } + + return nil +} + // ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed // KubernetesAuthMethodConfig. func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) { @@ -404,7 +449,7 @@ func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLToken @@ -425,7 +470,7 @@ func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out struct{ ID string } @@ -446,7 +491,7 @@ func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -462,7 +507,7 @@ func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -478,7 +523,7 @@ func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out struct{ ID string } @@ -498,7 +543,7 @@ func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -524,7 +569,7 @@ func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -545,7 +590,7 @@ func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, e if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -568,7 +613,7 @@ func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMe if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLToken @@ -593,7 +638,7 @@ func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMe if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLToken @@ -620,7 +665,7 @@ func (a *ACL) TokenClone(tokenID string, description string, q *WriteOptions) (* if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLToken @@ -640,7 +685,7 @@ func (a *ACL) TokenDelete(tokenID string, q *WriteOptions) (*WriteMeta, error) { if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -655,7 +700,7 @@ func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta, if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -679,7 +724,7 @@ func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -702,7 +747,7 @@ func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, erro if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -728,7 +773,7 @@ func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLPolicy @@ -753,7 +798,7 @@ func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *Wri if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLPolicy @@ -772,7 +817,7 @@ func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error) if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -786,7 +831,7 @@ func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMe if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -808,7 +853,7 @@ func (a *ACL) PolicyReadByName(policyName string, q *QueryOptions) (*ACLPolicy, if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -835,7 +880,7 @@ func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, er if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -855,11 +900,12 @@ func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, er func (a *ACL) RulesTranslate(rules io.Reader) (string, error) { r := a.c.newRequest("POST", "/v1/acl/rules/translate") r.body = rules + r.header.Set("Content-Type", "text/plain") rtt, resp, err := requireOK(a.c.doRequest(r)) if err != nil { return "", err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) qm.RequestTime = rtt @@ -883,7 +929,7 @@ func (a *ACL) RulesTranslateToken(tokenID string) (string, error) { if err != nil { return "", err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) qm.RequestTime = rtt @@ -910,7 +956,7 @@ func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLRole @@ -935,7 +981,7 @@ func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLRole @@ -954,7 +1000,7 @@ func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) { if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -968,7 +1014,7 @@ func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, er if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -994,7 +1040,7 @@ func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *Query if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -1022,7 +1068,7 @@ func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -1048,7 +1094,7 @@ func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuth if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLAuthMethod @@ -1072,7 +1118,7 @@ func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuth if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLAuthMethod @@ -1095,7 +1141,7 @@ func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -1113,7 +1159,7 @@ func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -1141,7 +1187,7 @@ func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *Query if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -1169,7 +1215,7 @@ func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBind if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLBindingRule @@ -1194,7 +1240,7 @@ func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBind if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLBindingRule @@ -1213,7 +1259,7 @@ func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMe if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -1227,7 +1273,7 @@ func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindin if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -1256,7 +1302,7 @@ func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBinding if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -1279,7 +1325,7 @@ func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMet if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLToken @@ -1297,7 +1343,7 @@ func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) { if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -1317,7 +1363,7 @@ func (a *ACL) OIDCAuthURL(auth *ACLOIDCAuthURLParams, q *WriteOptions) (string, if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out aclOIDCAuthURLResponse @@ -1352,7 +1398,7 @@ func (a *ACL) OIDCCallback(auth *ACLOIDCCallbackParams, q *WriteOptions) (*ACLTo if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out ACLToken diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index a4cc143f05..ec144c7f7f 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -3,6 +3,7 @@ package api import ( "bufio" "bytes" + "context" "fmt" "io" "net/http" @@ -62,6 +63,7 @@ type AgentCheck struct { ServiceID string ServiceName string Type string + ExposedPort int Definition HealthCheckDefinition Namespace string `json:",omitempty"` } @@ -81,6 +83,7 @@ type AgentService struct { Meta map[string]string Port int Address string + SocketPath string TaggedAddresses map[string]ServiceAddress `json:",omitempty"` Weights AgentWeights EnableTagOverride bool @@ -113,14 +116,17 @@ type AgentServiceConnect struct { // AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy // ServiceDefinition or response. type AgentServiceConnectProxyConfig struct { - DestinationServiceName string `json:",omitempty"` - DestinationServiceID string `json:",omitempty"` - LocalServiceAddress string `json:",omitempty"` - LocalServicePort int `json:",omitempty"` - Config map[string]interface{} `json:",omitempty" bexpr:"-"` - Upstreams []Upstream `json:",omitempty"` - MeshGateway MeshGatewayConfig `json:",omitempty"` - Expose ExposeConfig `json:",omitempty"` + DestinationServiceName string `json:",omitempty"` + DestinationServiceID string `json:",omitempty"` + LocalServiceAddress string `json:",omitempty"` + LocalServicePort int `json:",omitempty"` + LocalServiceSocketPath string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` + Upstreams []Upstream `json:",omitempty"` + MeshGateway MeshGatewayConfig `json:",omitempty"` + Expose ExposeConfig `json:",omitempty"` } const ( @@ -266,12 +272,23 @@ type AgentServiceRegistration struct { Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` } -//ServiceRegisterOpts is used to pass extra options to the service register. +// ServiceRegisterOpts is used to pass extra options to the service register. type ServiceRegisterOpts struct { //Missing healthchecks will be deleted from the agent. //Using this parameter allows to idempotently register a service and its checks without //having to manually deregister checks. ReplaceExistingChecks bool + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use WithContext() to set the context. + ctx context.Context +} + +// WithContext sets the context to be used for the request on a new ServiceRegisterOpts, +// and returns the opts. +func (o ServiceRegisterOpts) WithContext(ctx context.Context) ServiceRegisterOpts { + o.ctx = ctx + return o } // AgentCheckRegistration is used to register a new check @@ -301,6 +318,7 @@ type AgentServiceCheck struct { TCP string `json:",omitempty"` Status string `json:",omitempty"` Notes string `json:",omitempty"` + TLSServerName string `json:",omitempty"` TLSSkipVerify bool `json:",omitempty"` GRPC string `json:",omitempty"` GRPCUseTLS bool `json:",omitempty"` @@ -392,8 +410,11 @@ type Upstream struct { Datacenter string `json:",omitempty"` LocalBindAddress string `json:",omitempty"` LocalBindPort int `json:",omitempty"` + LocalBindSocketPath string `json:",omitempty"` + LocalBindSocketMode string `json:",omitempty"` Config map[string]interface{} `json:",omitempty" bexpr:"-"` MeshGateway MeshGatewayConfig `json:",omitempty"` + CentrallyConfigured bool `json:",omitempty" bexpr:"-"` } // Agent can be used to query the Agent endpoints @@ -417,7 +438,7 @@ func (a *Agent) Self() (map[string]map[string]interface{}, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out map[string]map[string]interface{} if err := decodeBody(resp, &out); err != nil { @@ -435,7 +456,7 @@ func (a *Agent) Host() (map[string]interface{}, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out map[string]interface{} if err := decodeBody(resp, &out); err != nil { @@ -452,7 +473,7 @@ func (a *Agent) Metrics() (*MetricsInfo, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out *MetricsInfo if err := decodeBody(resp, &out); err != nil { @@ -468,7 +489,7 @@ func (a *Agent) Reload() error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -494,13 +515,20 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) { // ChecksWithFilter returns a subset of the locally registered checks that match // the given filter expression func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) { + return a.ChecksWithFilterOpts(filter, nil) +} + +// ChecksWithFilterOpts returns a subset of the locally registered checks that match +// the given filter expression and QueryOptions. +func (a *Agent) ChecksWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentCheck, error) { r := a.c.newRequest("GET", "/v1/agent/checks") + r.setQueryOptions(q) r.filterQuery(filter) _, resp, err := requireOK(a.c.doRequest(r)) if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out map[string]*AgentCheck if err := decodeBody(resp, &out); err != nil { @@ -517,13 +545,20 @@ func (a *Agent) Services() (map[string]*AgentService, error) { // ServicesWithFilter returns a subset of the locally registered services that match // the given filter expression func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) { + return a.ServicesWithFilterOpts(filter, nil) +} + +// ServicesWithFilterOpts returns a subset of the locally registered services that match +// the given filter expression and QueryOptions. +func (a *Agent) ServicesWithFilterOpts(filter string, q *QueryOptions) (map[string]*AgentService, error) { r := a.c.newRequest("GET", "/v1/agent/services") + r.setQueryOptions(q) r.filterQuery(filter) _, resp, err := requireOK(a.c.doRequest(r)) if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out map[string]*AgentService if err := decodeBody(resp, &out); err != nil { @@ -546,7 +581,7 @@ func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceC if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) // Service not Found if resp.StatusCode == http.StatusNotFound { return HealthCritical, nil, nil @@ -580,7 +615,7 @@ func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentService if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) // Service not Found if resp.StatusCode == http.StatusNotFound { return HealthCritical, nil, nil @@ -613,7 +648,7 @@ func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *Quer if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -638,7 +673,7 @@ func (a *Agent) Members(wan bool) ([]*AgentMember, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out []*AgentMember if err := decodeBody(resp, &out); err != nil { @@ -660,7 +695,7 @@ func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out []*AgentMember if err := decodeBody(resp, &out); err != nil { @@ -688,6 +723,7 @@ func (a *Agent) ServiceRegisterOpts(service *AgentServiceRegistration, opts Serv func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceRegisterOpts) error { r := a.c.newRequest("PUT", "/v1/agent/service/register") r.obj = service + r.ctx = opts.ctx if opts.ReplaceExistingChecks { r.params.Set("replace-existing-checks", "true") } @@ -695,7 +731,7 @@ func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceR if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -707,7 +743,20 @@ func (a *Agent) ServiceDeregister(serviceID string) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) + return nil +} + +// ServiceDeregisterOpts is used to deregister a service with +// the local agent with QueryOptions. +func (a *Agent) ServiceDeregisterOpts(serviceID string, q *QueryOptions) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + r.setQueryOptions(q) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + closeResponseBody(resp) return nil } @@ -762,7 +811,7 @@ func (a *Agent) updateTTL(checkID, note, status string) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -785,6 +834,10 @@ type checkUpdate struct { // strings for compatibility (though a newer version of Consul will still be // required to use this API). func (a *Agent) UpdateTTL(checkID, output, status string) error { + return a.UpdateTTLOpts(checkID, output, status, nil) +} + +func (a *Agent) UpdateTTLOpts(checkID, output, status string, q *QueryOptions) error { switch status { case "pass", HealthPassing: status = HealthPassing @@ -798,6 +851,7 @@ func (a *Agent) UpdateTTL(checkID, output, status string) error { endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) r := a.c.newRequest("PUT", endpoint) + r.setQueryOptions(q) r.obj = &checkUpdate{ Status: status, Output: output, @@ -807,7 +861,7 @@ func (a *Agent) UpdateTTL(checkID, output, status string) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -820,19 +874,26 @@ func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } // CheckDeregister is used to deregister a check with // the local agent func (a *Agent) CheckDeregister(checkID string) error { + return a.CheckDeregisterOpts(checkID, nil) +} + +// CheckDeregisterOpts is used to deregister a check with +// the local agent using query options +func (a *Agent) CheckDeregisterOpts(checkID string, q *QueryOptions) error { r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + r.setQueryOptions(q) _, resp, err := requireOK(a.c.doRequest(r)) if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -847,7 +908,7 @@ func (a *Agent) Join(addr string, wan bool) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -858,7 +919,7 @@ func (a *Agent) Leave() error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -869,7 +930,7 @@ func (a *Agent) ForceLeave(node string) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -882,7 +943,7 @@ func (a *Agent) ForceLeavePrune(node string) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -895,7 +956,7 @@ func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, e if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out AgentAuthorize if err := decodeBody(resp, &out); err != nil { @@ -912,7 +973,7 @@ func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -933,7 +994,7 @@ func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *Qu if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -956,7 +1017,7 @@ func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -969,7 +1030,7 @@ func (a *Agent) DisableServiceMaintenance(serviceID string) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -983,7 +1044,7 @@ func (a *Agent) EnableNodeMaintenance(reason string) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -996,7 +1057,7 @@ func (a *Agent) DisableNodeMaintenance() error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -1027,7 +1088,7 @@ func (a *Agent) monitor(loglevel string, logJSON bool, stopCh <-chan struct{}, q } logCh := make(chan string, 64) go func() { - defer resp.Body.Close() + defer closeResponseBody(resp) scanner := bufio.NewScanner(resp.Body) for { select { @@ -1135,7 +1196,7 @@ func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMe if err != nil { return nil, 0, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 08f00c4069..a35980a9a1 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -14,6 +14,7 @@ import ( "os" "strconv" "strings" + "sync" "time" "github.com/hashicorp/go-cleanhttp" @@ -548,9 +549,48 @@ func (c *Config) GenerateEnv() []string { // Client provides a client to the Consul API type Client struct { + modifyLock sync.RWMutex + headers http.Header + config Config } +// Headers gets the current set of headers used for requests. This returns a +// copy; to modify it call AddHeader or SetHeaders. +func (c *Client) Headers() http.Header { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.headers == nil { + return nil + } + + ret := make(http.Header) + for k, v := range c.headers { + for _, val := range v { + ret[k] = append(ret[k], val) + } + } + + return ret +} + +// AddHeader allows a single header key/value pair to be added +// in a race-safe fashion. +func (c *Client) AddHeader(key, value string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers.Add(key, value) +} + +// SetHeaders clears all previous headers and uses only the given +// ones going forward. +func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers = headers +} + // NewClient returns a new client func NewClient(config *Config) (*Client, error) { // bootstrap the config @@ -640,7 +680,7 @@ func NewClient(config *Config) (*Client, error) { config.Token = defConfig.Token } - return &Client{config: *config}, nil + return &Client{config: *config, headers: make(http.Header)}, nil } // NewHttpClient returns an http client configured with the given Transport and TLS @@ -831,6 +871,12 @@ func (r *request) toHTTP() (*http.Request, error) { req.Host = r.url.Host req.Header = r.header + // Content-Type must always be set when a body is present + // See https://github.com/hashicorp/consul/issues/10011 + if req.Body != nil && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "application/json") + } + // Setup auth if r.config.HttpAuth != nil { req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) @@ -853,8 +899,9 @@ func (c *Client) newRequest(method, path string) *request { Path: path, }, params: make(map[string][]string), - header: make(http.Header), + header: c.Headers(), } + if c.config.Datacenter != "" { r.params.Set("dc", c.config.Datacenter) } @@ -892,7 +939,7 @@ func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*Quer if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -914,7 +961,7 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (* if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} if out != nil { @@ -1008,7 +1055,7 @@ func encodeBody(obj interface{}) (io.Reader, error) { func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { if e != nil { if resp != nil { - resp.Body.Close() + closeResponseBody(resp) } return d, nil, e } @@ -1018,6 +1065,14 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h return d, resp, nil } +// closeResponseBody reads resp.Body until EOF, and then closes it. The read +// is necessary to ensure that the http.Client's underlying RoundTripper is able +// to re-use the TCP connection. See godoc on net/http.Client.Do. +func closeResponseBody(resp *http.Response) error { + _, _ = io.Copy(ioutil.Discard, resp.Body) + return resp.Body.Close() +} + func (req *request) filterQuery(filter string) { if filter == "" { return @@ -1032,14 +1087,14 @@ func (req *request) filterQuery(filter string) { func generateUnexpectedResponseCodeError(resp *http.Response) error { var buf bytes.Buffer io.Copy(&buf, resp.Body) - resp.Body.Close() + closeResponseBody(resp) return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) } func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) { if e != nil { if resp != nil { - resp.Body.Close() + closeResponseBody(resp) } return false, d, nil, e } diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index 607d5d065c..b8588d8285 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -122,7 +122,7 @@ func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMet if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -138,7 +138,7 @@ func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*Wr if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -153,7 +153,7 @@ func (c *Catalog) Datacenters() ([]string, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out []string if err := decodeBody(resp, &out); err != nil { @@ -170,7 +170,7 @@ func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -191,7 +191,7 @@ func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, er if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -248,7 +248,7 @@ func (c *Catalog) service(service string, tags []string, q *QueryOptions, connec if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -269,7 +269,7 @@ func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -293,7 +293,7 @@ func (c *Catalog) NodeServiceList(node string, q *QueryOptions) (*CatalogNodeSer if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -314,7 +314,7 @@ func (c *Catalog) GatewayServices(gateway string, q *QueryOptions) ([]*GatewaySe if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index f5ef60e294..e28c7dc181 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -21,8 +21,10 @@ const ( IngressGateway string = "ingress-gateway" TerminatingGateway string = "terminating-gateway" ServiceIntentions string = "service-intentions" + MeshConfig string = "mesh" ProxyConfigGlobal string = "global" + MeshConfigMesh string = "mesh" ) type ConfigEntry interface { @@ -46,8 +48,8 @@ const ( // should be direct and not flow through a mesh gateway. MeshGatewayModeNone MeshGatewayMode = "none" - // MeshGatewayModeLocal represents that the Upstrea Connect connections - // should be made to a mesh gateway in the local datacenter. This is + // MeshGatewayModeLocal represents that the Upstream Connect connections + // should be made to a mesh gateway in the local datacenter. MeshGatewayModeLocal MeshGatewayMode = "local" // MeshGatewayModeRemote represents that the Upstream Connect connections @@ -62,6 +64,33 @@ type MeshGatewayConfig struct { Mode MeshGatewayMode `json:",omitempty"` } +type ProxyMode string + +const ( + // ProxyModeDefault represents no specific mode and should + // be used to indicate that a different layer of the configuration + // chain should take precedence + ProxyModeDefault ProxyMode = "" + + // ProxyModeTransparent represents that inbound and outbound application + // traffic is being captured and redirected through the proxy. + ProxyModeTransparent ProxyMode = "transparent" + + // ProxyModeDirect represents that the proxy's listeners must be dialed directly + // by the local application and other proxies. + ProxyModeDirect ProxyMode = "direct" +) + +type TransparentProxyConfig struct { + // The port of the listener where outbound application traffic is being redirected to. + OutboundListenerPort int `json:",omitempty" alias:"outbound_listener_port"` + + // DialedDirectly indicates whether transparent proxies can dial this proxy instance directly. + // The discovery chain is not considered when dialing a service instance directly. + // This setting is useful when addressing stateful services, such as a database cluster with a leader node. + DialedDirectly bool `json:",omitempty" alias:"dialed_directly"` +} + // ExposeConfig describes HTTP paths to expose through Envoy outside of Connect. // Users can expose individual paths and/or all HTTP/GRPC paths for checks. type ExposeConfig struct { @@ -91,14 +120,100 @@ type ExposePath struct { ParsedFromCheck bool } +type UpstreamConfiguration struct { + // Overrides is a slice of per-service configuration. The name field is + // required. + Overrides []*UpstreamConfig `json:",omitempty"` + + // Defaults contains default configuration for all upstreams of a given + // service. The name field must be empty. + Defaults *UpstreamConfig `json:",omitempty"` +} + +type UpstreamConfig struct { + // Name is only accepted within a service-defaults config entry. + Name string `json:",omitempty"` + // Namespace is only accepted within a service-defaults config entry. + Namespace string `json:",omitempty"` + + // EnvoyListenerJSON is a complete override ("escape hatch") for the upstream's + // listener. + // + // Note: This escape hatch is NOT compatible with the discovery chain and + // will be ignored if a discovery chain is active. + EnvoyListenerJSON string `json:",omitempty" alias:"envoy_listener_json"` + + // EnvoyClusterJSON is a complete override ("escape hatch") for the upstream's + // cluster. The Connect client TLS certificate and context will be injected + // overriding any TLS settings present. + // + // Note: This escape hatch is NOT compatible with the discovery chain and + // will be ignored if a discovery chain is active. + EnvoyClusterJSON string `json:",omitempty" alias:"envoy_cluster_json"` + + // Protocol describes the upstream's service protocol. Valid values are "tcp", + // "http" and "grpc". Anything else is treated as tcp. The enables protocol + // aware features like per-request metrics and connection pooling, tracing, + // routing etc. + Protocol string `json:",omitempty"` + + // ConnectTimeoutMs is the number of milliseconds to timeout making a new + // connection to this upstream. Defaults to 5000 (5 seconds) if not set. + ConnectTimeoutMs int `json:",omitempty" alias:"connect_timeout_ms"` + + // Limits are the set of limits that are applied to the proxy for a specific upstream of a + // service instance. + Limits *UpstreamLimits `json:",omitempty"` + + // PassiveHealthCheck configuration determines how upstream proxy instances will + // be monitored for removal from the load balancing pool. + PassiveHealthCheck *PassiveHealthCheck `json:",omitempty" alias:"passive_health_check"` + + // MeshGatewayConfig controls how Mesh Gateways are configured and used + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway" ` +} + +type PassiveHealthCheck struct { + // Interval between health check analysis sweeps. Each sweep may remove + // hosts or return hosts to the pool. + Interval time.Duration `json:",omitempty"` + + // MaxFailures is the count of consecutive failures that results in a host + // being removed from the pool. + MaxFailures uint32 `alias:"max_failures"` +} + +// UpstreamLimits describes the limits that are associated with a specific +// upstream of a service instance. +type UpstreamLimits struct { + // MaxConnections is the maximum number of connections the local proxy can + // make to the upstream service. + MaxConnections *int `alias:"max_connections"` + + // MaxPendingRequests is the maximum number of requests that will be queued + // waiting for an available connection. This is mostly applicable to HTTP/1.1 + // clusters since all HTTP/2 requests are streamed over a single + // connection. + MaxPendingRequests *int `alias:"max_pending_requests"` + + // MaxConcurrentRequests is the maximum number of in-flight requests that will be allowed + // to the upstream cluster at a point in time. This is mostly applicable to HTTP/2 + // clusters since all HTTP/1.1 requests are limited by MaxConnections. + MaxConcurrentRequests *int `alias:"max_concurrent_requests"` +} + type ServiceConfigEntry struct { - Kind string - Name string - Namespace string `json:",omitempty"` - Protocol string `json:",omitempty"` - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` - Expose ExposeConfig `json:",omitempty"` - ExternalSNI string `json:",omitempty" alias:"external_sni"` + Kind string + Name string + Namespace string `json:",omitempty"` + Protocol string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` + Expose ExposeConfig `json:",omitempty"` + ExternalSNI string `json:",omitempty" alias:"external_sni"` + UpstreamConfig *UpstreamConfiguration `json:",omitempty" alias:"upstream_config"` + Meta map[string]string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 @@ -129,15 +244,17 @@ func (s *ServiceConfigEntry) GetModifyIndex() uint64 { } type ProxyConfigEntry struct { - Kind string - Name string - Namespace string `json:",omitempty"` - Config map[string]interface{} `json:",omitempty"` - MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` - Expose ExposeConfig `json:",omitempty"` - Meta map[string]string `json:",omitempty"` - CreateIndex uint64 - ModifyIndex uint64 + Kind string + Name string + Namespace string `json:",omitempty"` + Mode ProxyMode `json:",omitempty"` + TransparentProxy *TransparentProxyConfig `json:",omitempty" alias:"transparent_proxy"` + Config map[string]interface{} `json:",omitempty"` + MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"` + Expose ExposeConfig `json:",omitempty"` + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 } func (p *ProxyConfigEntry) GetKind() string { @@ -182,6 +299,8 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) { return &TerminatingGatewayConfigEntry{Kind: kind, Name: name}, nil case ServiceIntentions: return &ServiceIntentionsConfigEntry{Kind: kind, Name: name}, nil + case MeshConfig: + return &MeshConfigEntry{}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } @@ -288,7 +407,7 @@ func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (Confi return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -313,7 +432,7 @@ func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *Q return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -351,7 +470,7 @@ func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *W if err != nil { return false, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var buf bytes.Buffer if _, err := io.Copy(&buf, resp.Body); err != nil { @@ -374,7 +493,7 @@ func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*W if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_cluster.go b/vendor/github.com/hashicorp/consul/api/config_entry_cluster.go new file mode 100644 index 0000000000..9ec18ea67e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_cluster.go @@ -0,0 +1,53 @@ +package api + +import "encoding/json" + +type MeshConfigEntry struct { + Namespace string `json:",omitempty"` + TransparentProxy TransparentProxyMeshConfig `alias:"transparent_proxy"` + Meta map[string]string `json:",omitempty"` + CreateIndex uint64 + ModifyIndex uint64 +} + +type TransparentProxyMeshConfig struct { + MeshDestinationsOnly bool `alias:"mesh_destinations_only"` +} + +func (e *MeshConfigEntry) GetKind() string { + return MeshConfig +} + +func (e *MeshConfigEntry) GetName() string { + return MeshConfigMesh +} + +func (e *MeshConfigEntry) GetNamespace() string { + return e.Namespace +} + +func (e *MeshConfigEntry) GetMeta() map[string]string { + return e.Meta +} + +func (e *MeshConfigEntry) GetCreateIndex() uint64 { + return e.CreateIndex +} + +func (e *MeshConfigEntry) GetModifyIndex() uint64 { + return e.ModifyIndex +} + +// MarshalJSON adds the Kind field so that the JSON can be decoded back into the +// correct type. +func (e *MeshConfigEntry) MarshalJSON() ([]byte, error) { + type Alias MeshConfigEntry + source := &struct { + Kind string + *Alias + }{ + Kind: MeshConfig, + Alias: (*Alias)(e), + } + return json.Marshal(source) +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go index 26a7bfb1df..f8a83dc15e 100644 --- a/vendor/github.com/hashicorp/consul/api/connect_ca.go +++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go @@ -23,6 +23,14 @@ type CAConfig struct { // configuration is an error. State map[string]string + // ForceWithoutCrossSigning indicates that the CA reconfiguration should go + // ahead even if the current CA is unable to cross sign certificates. This + // risks temporary connection failures during the rollout as new leafs will be + // rejected by proxies that have not yet observed the new root cert but is the + // only option if a CA that doesn't support cross signing needs to be + // reconfigured or mirated away from. + ForceWithoutCrossSigning bool + CreateIndex uint64 ModifyIndex uint64 } @@ -130,7 +138,7 @@ func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -151,7 +159,7 @@ func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -173,7 +181,7 @@ func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, erro if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go index 26fb6cc4b1..d1f0b6530c 100644 --- a/vendor/github.com/hashicorp/consul/api/connect_intention.go +++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go @@ -170,7 +170,7 @@ func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -194,7 +194,7 @@ func (h *Connect) IntentionGetExact(source, destination string, q *QueryOptions) if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -226,7 +226,7 @@ func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMe if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -259,7 +259,7 @@ func (h *Connect) IntentionDeleteExact(source, destination string, q *WriteOptio if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &WriteMeta{} qm.RequestTime = rtt @@ -277,7 +277,7 @@ func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &WriteMeta{} qm.RequestTime = rtt @@ -303,7 +303,7 @@ func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[str if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -330,7 +330,7 @@ func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, * if err != nil { return false, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -355,7 +355,7 @@ func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta, if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -382,7 +382,7 @@ func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *Wri if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -406,7 +406,7 @@ func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go index 776630f67d..32c7822c1b 100644 --- a/vendor/github.com/hashicorp/consul/api/coordinate.go +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -37,7 +37,7 @@ func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out []*CoordinateDatacenterMap if err := decodeBody(resp, &out); err != nil { @@ -54,7 +54,7 @@ func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, err if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -76,7 +76,7 @@ func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -92,7 +92,7 @@ func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *Qu if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go index 238046853a..56dcc9bcd2 100644 --- a/vendor/github.com/hashicorp/consul/api/debug.go +++ b/vendor/github.com/hashicorp/consul/api/debug.go @@ -27,7 +27,11 @@ func (d *Debug) Heap() ([]byte, error) { if err != nil { return nil, fmt.Errorf("error making request: %s", err) } - defer resp.Body.Close() + defer closeResponseBody(resp) + + if resp.StatusCode != 200 { + return nil, generateUnexpectedResponseCodeError(resp) + } // We return a raw response because we're just passing through a response // from the pprof handlers @@ -50,7 +54,11 @@ func (d *Debug) Profile(seconds int) ([]byte, error) { if err != nil { return nil, fmt.Errorf("error making request: %s", err) } - defer resp.Body.Close() + defer closeResponseBody(resp) + + if resp.StatusCode != 200 { + return nil, generateUnexpectedResponseCodeError(resp) + } // We return a raw response because we're just passing through a response // from the pprof handlers @@ -73,7 +81,11 @@ func (d *Debug) Trace(seconds int) ([]byte, error) { if err != nil { return nil, fmt.Errorf("error making request: %s", err) } - defer resp.Body.Close() + defer closeResponseBody(resp) + + if resp.StatusCode != 200 { + return nil, generateUnexpectedResponseCodeError(resp) + } // We return a raw response because we're just passing through a response // from the pprof handlers @@ -93,7 +105,11 @@ func (d *Debug) Goroutine() ([]byte, error) { if err != nil { return nil, fmt.Errorf("error making request: %s", err) } - defer resp.Body.Close() + defer closeResponseBody(resp) + + if resp.StatusCode != 200 { + return nil, generateUnexpectedResponseCodeError(resp) + } // We return a raw response because we're just passing through a response // from the pprof handlers diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go index f67f881c23..b78e6c3c42 100644 --- a/vendor/github.com/hashicorp/consul/api/discovery_chain.go +++ b/vendor/github.com/hashicorp/consul/api/discovery_chain.go @@ -43,7 +43,7 @@ func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryO if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go index 85b5b069b0..1da41375cd 100644 --- a/vendor/github.com/hashicorp/consul/api/event.go +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -45,12 +45,13 @@ func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, er if params.Payload != nil { r.body = bytes.NewReader(params.Payload) } + r.header.Set("Content-Type", "application/octet-stream") rtt, resp, err := requireOK(e.c.doRequest(r)) if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out UserEvent @@ -74,7 +75,7 @@ func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, er if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod index 89e6e0c94b..348ad8a738 100644 --- a/vendor/github.com/hashicorp/consul/api/go.mod +++ b/vendor/github.com/hashicorp/consul/api/go.mod @@ -5,7 +5,7 @@ go 1.12 replace github.com/hashicorp/consul/sdk => ../sdk require ( - github.com/hashicorp/consul/sdk v0.7.0 + github.com/hashicorp/consul/sdk v0.8.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-rootcerts v1.0.2 diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum index 57ef543992..b95bd47444 100644 --- a/vendor/github.com/hashicorp/consul/api/go.sum +++ b/vendor/github.com/hashicorp/consul/api/go.sum @@ -7,7 +7,6 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -23,7 +22,6 @@ github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxB github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -49,18 +47,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= @@ -83,11 +78,11 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= @@ -97,22 +92,18 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -121,11 +112,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 99b9ac2574..0a3fd8dd1e 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -58,6 +58,7 @@ type HealthCheckDefinition struct { Header map[string][]string Method string Body string + TLSServerName string TLSSkipVerify bool TCP string IntervalDuration time.Duration `json:"-"` @@ -233,7 +234,7 @@ func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, e if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -254,7 +255,7 @@ func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMe if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -331,7 +332,7 @@ func (h *Health) service(service string, tags []string, passingOnly bool, q *Que if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -361,7 +362,7 @@ func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go index 351d287d66..1d5c11295e 100644 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -69,7 +69,7 @@ func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { if resp == nil { return nil, qm, nil } - defer resp.Body.Close() + defer closeResponseBody(resp) var entries []*KVPair if err := decodeBody(resp, &entries); err != nil { @@ -90,7 +90,7 @@ func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { if resp == nil { return nil, qm, nil } - defer resp.Body.Close() + defer closeResponseBody(resp) var entries []*KVPair if err := decodeBody(resp, &entries); err != nil { @@ -113,7 +113,7 @@ func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMe if resp == nil { return nil, qm, nil } - defer resp.Body.Close() + defer closeResponseBody(resp) var entries []string if err := decodeBody(resp, &entries); err != nil { @@ -138,10 +138,10 @@ func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) qm.RequestTime = rtt if resp.StatusCode == 404 { - resp.Body.Close() + closeResponseBody(resp) return nil, qm, nil } else if resp.StatusCode != 200 { - resp.Body.Close() + closeResponseBody(resp) return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) } return resp, qm, nil @@ -205,11 +205,12 @@ func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOpti r.params.Set(param, val) } r.body = bytes.NewReader(body) + r.header.Set("Content-Type", "application/octet-stream") rtt, resp, err := requireOK(k.c.doRequest(r)) if err != nil { return false, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &WriteMeta{} qm.RequestTime = rtt @@ -253,7 +254,7 @@ func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOption if err != nil { return false, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &WriteMeta{} qm.RequestTime = rtt diff --git a/vendor/github.com/hashicorp/consul/api/namespace.go b/vendor/github.com/hashicorp/consul/api/namespace.go index 49782d2a8c..20f6c8d5cf 100644 --- a/vendor/github.com/hashicorp/consul/api/namespace.go +++ b/vendor/github.com/hashicorp/consul/api/namespace.go @@ -67,7 +67,7 @@ func (n *Namespaces) Create(ns *Namespace, q *WriteOptions) (*Namespace, *WriteM if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out Namespace @@ -90,7 +90,7 @@ func (n *Namespaces) Update(ns *Namespace, q *WriteOptions) (*Namespace, *WriteM if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} var out Namespace @@ -109,7 +109,7 @@ func (n *Namespaces) Read(name string, q *QueryOptions) (*Namespace, *QueryMeta, if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) @@ -132,7 +132,7 @@ func (n *Namespaces) Delete(name string, q *WriteOptions) (*WriteMeta, error) { if err != nil { return nil, err } - resp.Body.Close() + closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} return wm, nil @@ -146,7 +146,7 @@ func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) { if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go index 5cf7e49730..5476f5c5b4 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_area.go +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -93,7 +93,7 @@ func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -114,7 +114,7 @@ func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (stri if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -154,7 +154,7 @@ func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, erro if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -171,7 +171,7 @@ func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go index 57876ee9f6..8175f5133a 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -284,7 +284,7 @@ func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfig if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out AutopilotConfiguration if err := decodeBody(resp, &out); err != nil { @@ -303,7 +303,7 @@ func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *W if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -319,7 +319,7 @@ func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *W if err != nil { return false, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var buf bytes.Buffer if _, err := io.Copy(&buf, resp.Body); err != nil { @@ -334,11 +334,24 @@ func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *W func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { r := op.c.newRequest("GET", "/v1/operator/autopilot/health") r.setQueryOptions(q) - _, resp, err := requireOK(op.c.doRequest(r)) + + // we cannot just use requireOK because this endpoint might use a 429 status to indicate + // that unhealthiness + _, resp, err := op.c.doRequest(r) if err != nil { + if resp != nil { + closeResponseBody(resp) + } return nil, err } - defer resp.Body.Close() + + // these are the only 2 status codes that would indicate that we should + // expect the body to contain the right format. + if resp.StatusCode != 200 && resp.StatusCode != 429 { + return nil, generateUnexpectedResponseCodeError(resp) + } + + defer closeResponseBody(resp) var out OperatorHealthReply if err := decodeBody(resp, &out); err != nil { @@ -354,7 +367,7 @@ func (op *Operator) AutopilotState(q *QueryOptions) (*AutopilotState, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out AutopilotState if err := decodeBody(resp, &out); err != nil { diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go index 5b80f9f914..baad70eedb 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -40,7 +40,7 @@ func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -52,7 +52,7 @@ func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out []*KeyringResponse if err := decodeBody(resp, &out); err != nil { @@ -72,7 +72,7 @@ func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -87,6 +87,6 @@ func (op *Operator) KeyringUse(key string, q *WriteOptions) error { if err != nil { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go index 51b64cef48..87904bd8cf 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_license.go +++ b/vendor/github.com/hashicorp/consul/api/operator_license.go @@ -1,8 +1,8 @@ package api import ( + "fmt" "io/ioutil" - "strings" "time" ) @@ -66,7 +66,7 @@ func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) { if err != nil { return "", err } - defer resp.Body.Close() + defer closeResponseBody(resp) data, err := ioutil.ReadAll(resp.Body) if err != nil { @@ -78,37 +78,17 @@ func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) { // LicenseReset will reset the license to the builtin one if it is still valid. // If the builtin license is invalid, the current license stays active. -func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) { - var reply LicenseReply - r := op.c.newRequest("DELETE", "/v1/operator/license") - r.setWriteOptions(opts) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := decodeBody(resp, &reply); err != nil { - return nil, err - } - - return &reply, nil +// +// DEPRECATED: Consul 1.10 removes the corresponding HTTP endpoint as licenses +// are now set via agent configuration instead of through the API +func (*Operator) LicenseReset(_ *WriteOptions) (*LicenseReply, error) { + return nil, fmt.Errorf("Consul 1.10 no longer supports API driven license management.") } -func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) { - var reply LicenseReply - r := op.c.newRequest("PUT", "/v1/operator/license") - r.setWriteOptions(opts) - r.body = strings.NewReader(license) - _, resp, err := requireOK(op.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := decodeBody(resp, &reply); err != nil { - return nil, err - } - - return &reply, nil +// LicensePut will configure the Consul Enterprise license for the target datacenter +// +// DEPRECATED: Consul 1.10 removes the corresponding HTTP endpoint as licenses +// are now set via agent configuration instead of through the API +func (*Operator) LicensePut(_ string, _ *WriteOptions) (*LicenseReply, error) { + return nil, fmt.Errorf("Consul 1.10 no longer supports API driven license management.") } diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go index c6d7165d4d..0bfb85d000 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_raft.go +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -44,7 +44,7 @@ func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, e if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var out RaftConfiguration if err := decodeBody(resp, &out); err != nil { @@ -67,7 +67,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err return err } - resp.Body.Close() + closeResponseBody(resp) return nil } @@ -84,6 +84,6 @@ func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { return err } - resp.Body.Close() + closeResponseBody(resp) return nil } diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go index 5ac2535c71..5b2d5a5d18 100644 --- a/vendor/github.com/hashicorp/consul/api/prepared_query.go +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -158,7 +158,7 @@ func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) if err != nil { return "", nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt @@ -204,7 +204,7 @@ func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, err if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{} wm.RequestTime = rtt diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go index 157ad53f55..3f61acfbb4 100644 --- a/vendor/github.com/hashicorp/consul/api/session.go +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -141,7 +141,7 @@ func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, if err != nil { return nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) wm := &WriteMeta{RequestTime: rtt} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go index e902377dd5..0c8294f37c 100644 --- a/vendor/github.com/hashicorp/consul/api/snapshot.go +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -38,6 +38,7 @@ func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { r := s.c.newRequest("PUT", "/v1/snapshot") r.body = in + r.header.Set("Content-Type", "application/octet-stream") r.setWriteOptions(q) _, _, err := requireOK(s.c.doRequest(r)) if err != nil { diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go index 57f379c7b1..2a81b9b5fe 100644 --- a/vendor/github.com/hashicorp/consul/api/status.go +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -22,7 +22,7 @@ func (s *Status) LeaderWithQueryOptions(q *QueryOptions) (string, error) { if err != nil { return "", err } - defer resp.Body.Close() + defer closeResponseBody(resp) var leader string if err := decodeBody(resp, &leader); err != nil { @@ -47,7 +47,7 @@ func (s *Status) PeersWithQueryOptions(q *QueryOptions) ([]string, error) { if err != nil { return nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) var peers []string if err := decodeBody(resp, &peers); err != nil { diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go index ef06bcbfef..55eb805f41 100644 --- a/vendor/github.com/hashicorp/consul/api/txn.go +++ b/vendor/github.com/hashicorp/consul/api/txn.go @@ -221,7 +221,7 @@ func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMet if err != nil { return false, nil, nil, err } - defer resp.Body.Close() + defer closeResponseBody(resp) qm := &QueryMeta{} parseQueryMeta(resp, qm) diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index b9e936344c..41215d7fc4 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -10,6 +10,7 @@ import ( "os" "strconv" "strings" + "sync" "syscall" "unsafe" @@ -27,6 +28,7 @@ const ( backgroundRed = 0x40 backgroundIntensity = 0x80 backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 ) @@ -93,6 +95,7 @@ type Writer struct { oldattr word oldpos coord rest bytes.Buffer + mutex sync.Mutex } // NewColorable returns new instance of Writer which handles escape sequence from File. @@ -432,6 +435,8 @@ func atoiWithDefault(s string, def int) (int, error) { // Write writes data on console func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) @@ -683,14 +688,19 @@ loop: switch { case n == 0 || n == 100: attr = w.oldattr - case 1 <= n && n <= 5: + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: attr |= foregroundIntensity - case n == 7: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case n == 22 || n == 25: - attr |= foregroundIntensity - case n == 27: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore case 30 <= n && n <= 37: attr &= backgroundMask if (n-30)&1 != 0 { @@ -709,7 +719,7 @@ loop: n256setup() } attr &= backgroundMask - attr |= n256foreAttr[n256] + attr |= n256foreAttr[n256%len(n256foreAttr)] i += 2 } } else if len(token) == 5 && token[i+1] == "2" { @@ -751,7 +761,7 @@ loop: n256setup() } attr &= foregroundMask - attr |= n256backAttr[n256] + attr |= n256backAttr[n256%len(n256backAttr)] i += 2 } } else if len(token) == 5 && token[i+1] == "2" { diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go index 825617fe21..3f29a48c48 100644 --- a/vendor/github.com/miekg/dns/acceptfunc.go +++ b/vendor/github.com/miekg/dns/acceptfunc.go @@ -25,6 +25,7 @@ var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc // MsgAcceptAction represents the action to be taken. type MsgAcceptAction int +// Allowed returned values from a MsgAcceptFunc. const ( MsgAccept MsgAcceptAction = iota // Accept the message MsgReject // Reject the message with a RcodeFormatError diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index 7880d7a826..8539aae6c7 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -8,9 +8,9 @@ import ( "crypto/elliptic" "crypto/rand" "crypto/rsa" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" + _ "crypto/sha1" // need its init function + _ "crypto/sha256" // need its init function + _ "crypto/sha512" // need its init function "encoding/asn1" "encoding/binary" "encoding/hex" diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 844cf92b8c..c9181783de 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -28,6 +28,41 @@ const ( _DO = 1 << 15 // DNSSEC OK ) +// makeDataOpt is used to unpack the EDNS0 option(s) from a message. +func makeDataOpt(code uint16) EDNS0 { + // All the EDNS0.* constants above need to be in this switch. + switch code { + case EDNS0LLQ: + return new(EDNS0_LLQ) + case EDNS0UL: + return new(EDNS0_UL) + case EDNS0NSID: + return new(EDNS0_NSID) + case EDNS0DAU: + return new(EDNS0_DAU) + case EDNS0DHU: + return new(EDNS0_DHU) + case EDNS0N3U: + return new(EDNS0_N3U) + case EDNS0SUBNET: + return new(EDNS0_SUBNET) + case EDNS0EXPIRE: + return new(EDNS0_EXPIRE) + case EDNS0COOKIE: + return new(EDNS0_COOKIE) + case EDNS0TCPKEEPALIVE: + return new(EDNS0_TCP_KEEPALIVE) + case EDNS0PADDING: + return new(EDNS0_PADDING) + case EDNS0EDE: + return new(EDNS0_EDE) + default: + e := new(EDNS0_LOCAL) + e.Code = code + return e + } +} + // OPT is the EDNS0 RR appended to messages to convey extra (meta) information. // See RFC 6891. type OPT struct { @@ -95,7 +130,7 @@ func (*OPT) parse(c *zlexer, origin string) *ParseError { return &ParseError{err: "OPT records do not have a presentation format"} } -func (r1 *OPT) isDuplicate(r2 RR) bool { return false } +func (rr *OPT) isDuplicate(r2 RR) bool { return false } // return the old value -> delete SetVersion? @@ -465,7 +500,7 @@ func (e *EDNS0_LLQ) copy() EDNS0 { return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife} } -// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. +// EDNS0_DAU implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. type EDNS0_DAU struct { Code uint16 // Always EDNS0DAU AlgCode []uint8 diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index 472ada5d19..5904927ca8 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -438,37 +438,6 @@ Option: return edns, off, nil } -func makeDataOpt(code uint16) EDNS0 { - switch code { - case EDNS0NSID: - return new(EDNS0_NSID) - case EDNS0SUBNET: - return new(EDNS0_SUBNET) - case EDNS0COOKIE: - return new(EDNS0_COOKIE) - case EDNS0EXPIRE: - return new(EDNS0_EXPIRE) - case EDNS0UL: - return new(EDNS0_UL) - case EDNS0LLQ: - return new(EDNS0_LLQ) - case EDNS0DAU: - return new(EDNS0_DAU) - case EDNS0DHU: - return new(EDNS0_DHU) - case EDNS0N3U: - return new(EDNS0_N3U) - case EDNS0PADDING: - return new(EDNS0_PADDING) - case EDNS0EDE: - return new(EDNS0_EDE) - default: - e := new(EDNS0_LOCAL) - e.Code = code - return e - } -} - func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { for _, el := range options { b, err := el.pack() diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go index 45c7f26d85..d256b652ea 100644 --- a/vendor/github.com/miekg/dns/privaterr.go +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -90,7 +90,7 @@ Fetch: return nil } -func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false } +func (r *PrivateRR) isDuplicate(r2 RR) bool { return false } // PrivateHandle registers a private resource record type. It requires // string and numeric representation of private RR type and generator function as argument. diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 05765aed87..e398484da9 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -734,7 +734,11 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError { return &ParseError{"", "bad HIP PublicKey", l} } rr.PublicKey = l.token // This cannot contain spaces - rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + decodedPK, decodedPKerr := base64.StdEncoding.DecodeString(rr.PublicKey) + if decodedPKerr != nil { + return &ParseError{"", "bad HIP PublicKey", l} + } + rr.PublicKeyLength = uint16(len(decodedPK)) // RendezvousServers (if any) l, _ = c.Next() diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index 64800daa6d..3344253c2b 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -10,6 +10,7 @@ import ( "strings" ) +// SVCBKey is the type of the keys used in the SVCB RR. type SVCBKey uint16 // Keys defined in draft-ietf-dnsop-svcb-https-01 Section 12.3.2. diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 99dd315bf1..d9becb67cd 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -152,12 +152,9 @@ const ( ) // Used in ZONEMD https://tools.ietf.org/html/rfc8976 - const ( - // ZoneMD Accepted Schemes ZoneMDSchemeSimple = 1 - // ZoneMD Hash Algorithms ZoneMDHashAlgSHA384 = 1 ZoneMDHashAlgSHA512 = 2 ) @@ -1416,13 +1413,13 @@ func (rr *APL) String() string { } // str returns presentation form of the APL prefix. -func (p *APLPrefix) str() string { +func (a *APLPrefix) str() string { var sb strings.Builder - if p.Negation { + if a.Negation { sb.WriteByte('!') } - switch len(p.Network.IP) { + switch len(a.Network.IP) { case net.IPv4len: sb.WriteByte('1') case net.IPv6len: @@ -1431,20 +1428,20 @@ func (p *APLPrefix) str() string { sb.WriteByte(':') - switch len(p.Network.IP) { + switch len(a.Network.IP) { case net.IPv4len: - sb.WriteString(p.Network.IP.String()) + sb.WriteString(a.Network.IP.String()) case net.IPv6len: // add prefix for IPv4-mapped IPv6 - if v4 := p.Network.IP.To4(); v4 != nil { + if v4 := a.Network.IP.To4(); v4 != nil { sb.WriteString("::ffff:") } - sb.WriteString(p.Network.IP.String()) + sb.WriteString(a.Network.IP.String()) } sb.WriteByte('/') - prefix, _ := p.Network.Mask.Size() + prefix, _ := a.Network.Mask.Size() sb.WriteString(strconv.Itoa(prefix)) return sb.String() @@ -1458,17 +1455,17 @@ func (a *APLPrefix) equals(b *APLPrefix) bool { } // copy returns a copy of the APL prefix. -func (p *APLPrefix) copy() APLPrefix { +func (a *APLPrefix) copy() APLPrefix { return APLPrefix{ - Negation: p.Negation, - Network: copyNet(p.Network), + Negation: a.Negation, + Network: copyNet(a.Network), } } // len returns size of the prefix in wire format. -func (p *APLPrefix) len() int { +func (a *APLPrefix) len() int { // 4-byte header and the network address prefix (see Section 4 of RFC 3123) - prefix, _ := p.Network.Mask.Size() + prefix, _ := a.Network.Mask.Size() return 4 + (prefix+7)/8 } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 695d1e8b48..622c69a1b8 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 42} +var Version = v{1, 1, 43} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 80502585f3..6c0c033d48 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -27,6 +27,7 @@ import ( "net" "net/http" "net/url" + "os" "strings" "sync" "time" @@ -379,18 +380,23 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT ExpectContinueTimeout: 1 * time.Second, DialContext: dialContext, } - if opts.http2Enabled { + if opts.http2Enabled || os.Getenv("PROMETHEUS_COMMON_ENABLE_HTTP2") != "" { // HTTP/2 support is golang has many problematic cornercases where // dead connections would be kept and used in connection pools. // https://github.com/golang/go/issues/32388 // https://github.com/golang/go/issues/39337 // https://github.com/golang/go/issues/39750 - // TODO: Re-Enable HTTP/2 once upstream issue is fixed. - // TODO: use ForceAttemptHTTP2 when we move to Go 1.13+. - err := http2.ConfigureTransport(rt.(*http.Transport)) + + // Enable HTTP2 if the environment variable + // PROMETHEUS_COMMON_ENABLE_HTTP2 is set. + // This is a temporary workaround so that users can safely test this + // and validate that HTTP2 can be enabled Prometheus-Wide again. + + http2t, err := http2.ConfigureTransports(rt.(*http.Transport)) if err != nil { return nil, err } + http2t.ReadIdleTimeout = time.Minute } // If a authorization_credentials is provided, create a round tripper that will set the diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index bd4e347454..64dc0eb40c 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,7 +18,7 @@ import ( "io" "net/http" - "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index b6079b31ee..84be0643ec 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -24,7 +24,7 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/common/model" ) diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index e438fe87fe..44290f1638 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -530,8 +530,6 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws storag // Cancel when execution is done or an error was raised. defer q.cancel() - const env = "query execution" - evalSpanTimer, ctx := q.stats.GetSpanTimer(ctx, stats.EvalTotalTime) defer evalSpanTimer.Finish() diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index e497be364b..d72b4caf65 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -513,6 +513,13 @@ func funcAbsentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN }) } +// === present_over_time(Vector parser.ValueTypeMatrix) Vector === +func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + return 1 + }) +} + func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { enh.Out = append(enh.Out, Sample{ @@ -959,6 +966,7 @@ var FunctionCalls = map[string]FunctionCall{ "minute": funcMinute, "month": funcMonth, "predict_linear": funcPredictLinear, + "present_over_time": funcPresentOverTime, "quantile_over_time": funcQuantileOverTime, "rate": funcRate, "resets": funcResets, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index a127cd28a4..da5d279f31 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -39,6 +39,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeMatrix}, ReturnType: ValueTypeVector, }, + "present_over_time": { + Name: "present_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, "avg_over_time": { Name: "avg_over_time", ArgTypes: []ValueType{ValueTypeMatrix}, diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index 60a45d0ba1..dc67b08bf9 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -583,7 +583,7 @@ func (t *Test) exec(tc testCommand) error { err = cmd.compareResult(vec) } if err != nil { - return errors.Wrapf(err, "error in %s %s (line %d) rande mode", cmd, iq.expr, cmd.line) + return errors.Wrapf(err, "error in %s %s (line %d) range mode", cmd, iq.expr, cmd.line) } } diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index fa3a71d352..ddcc8a1a40 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -170,8 +170,8 @@ func (m Matrix) Len() int { return len(m) } func (m Matrix) Less(i, j int) bool { return labels.Compare(m[i].Metric, m[j].Metric) < 0 } func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -// ContainsSameLabelset checks if a matrix has samples with the same labelset -// Such a behavior is semantically undefined +// ContainsSameLabelset checks if a matrix has samples with the same labelset. +// Such a behavior is semantically undefined. // https://github.com/prometheus/prometheus/issues/4562 func (m Matrix) ContainsSameLabelset() bool { l := make(map[uint64]struct{}, len(m)) diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 20932cd6dd..e86221c663 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -14,11 +14,8 @@ package scrape import ( - "encoding" "fmt" "hash/fnv" - "net" - "os" "reflect" "sync" "time" @@ -32,6 +29,7 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/osutil" ) var targetMetadataCache = newMetadataMetricsCollector() @@ -206,7 +204,7 @@ func (m *Manager) reload() { // setJitterSeed calculates a global jitterSeed per server relying on extra label set. func (m *Manager) setJitterSeed(labels labels.Labels) error { h := fnv.New64a() - hostname, err := getFqdn() + hostname, err := osutil.GetFQDN() if err != nil { return err } @@ -319,46 +317,3 @@ func (m *Manager) TargetsDropped() map[string][]*Target { } return targets } - -// getFqdn returns a FQDN if it's possible, otherwise falls back to hostname. -func getFqdn() (string, error) { - hostname, err := os.Hostname() - if err != nil { - return "", err - } - - ips, err := net.LookupIP(hostname) - if err != nil { - // Return the system hostname if we can't look up the IP address. - return hostname, nil - } - - lookup := func(ipStr encoding.TextMarshaler) (string, error) { - ip, err := ipStr.MarshalText() - if err != nil { - return "", err - } - hosts, err := net.LookupAddr(string(ip)) - if err != nil || len(hosts) == 0 { - return "", err - } - return hosts[0], nil - } - - for _, addr := range ips { - if ip := addr.To4(); ip != nil { - if fqdn, err := lookup(ip); err == nil { - return fqdn, nil - } - - } - - if ip := addr.To16(); ip != nil { - if fqdn, err := lookup(ip); err == nil { - return fqdn, nil - } - - } - } - return hostname, nil -} diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index d9ed8a02b7..6835a23b2c 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -225,11 +225,10 @@ type scrapePool struct { cancel context.CancelFunc // mtx must not be taken after targetMtx. - mtx sync.Mutex - config *config.ScrapeConfig - client *http.Client - loops map[uint64]loop - targetLimitHit bool // Internal state to speed up the target_limit checks. + mtx sync.Mutex + config *config.ScrapeConfig + client *http.Client + loops map[uint64]loop targetMtx sync.Mutex // activeTargets and loops must always be synchronized to have the same @@ -574,20 +573,14 @@ func (sp *scrapePool) sync(targets []*Target) { // refreshTargetLimitErr returns an error that can be passed to the scrape loops // if the number of targets exceeds the configured limit. func (sp *scrapePool) refreshTargetLimitErr() error { - if sp.config == nil || sp.config.TargetLimit == 0 && !sp.targetLimitHit { + if sp.config == nil || sp.config.TargetLimit == 0 { return nil } - l := len(sp.activeTargets) - if l <= int(sp.config.TargetLimit) && !sp.targetLimitHit { - return nil - } - var err error - sp.targetLimitHit = l > int(sp.config.TargetLimit) - if sp.targetLimitHit { + if l := len(sp.activeTargets); l > int(sp.config.TargetLimit) { targetScrapePoolExceededTargetLimit.Inc() - err = fmt.Errorf("target_limit exceeded (number of targets: %d, limit: %d)", l, sp.config.TargetLimit) + return fmt.Errorf("target_limit exceeded (number of targets: %d, limit: %d)", l, sp.config.TargetLimit) } - return err + return nil } func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index d3dab0e21e..715a2a94f1 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -145,6 +145,9 @@ type SelectHints struct { Grouping []string // List of label names used in aggregation. By bool // Indicate whether it is without or by. Range int64 // Range vector selector range in milliseconds. + + ShardIndex uint64 // Current shard index (starts from 0 and up to ShardCount-1). + ShardCount uint64 // Total number of shards (0 means sharding is disabled). } // TODO(bwplotka): Move to promql/engine_test.go? diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 655886b469..9631de1c9c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -352,10 +352,12 @@ type QueueManager struct { clientMtx sync.RWMutex storeClient WriteClient - seriesMtx sync.Mutex - seriesLabels map[uint64]labels.Labels + seriesMtx sync.Mutex // Covers seriesLabels and droppedSeries. + seriesLabels map[uint64]labels.Labels + droppedSeries map[uint64]struct{} + + seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first. seriesSegmentIndexes map[uint64]int - droppedSeries map[uint64]struct{} shards *shards numShards int @@ -642,6 +644,8 @@ func (t *QueueManager) Stop() { func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) { t.seriesMtx.Lock() defer t.seriesMtx.Unlock() + t.seriesSegmentMtx.Lock() + defer t.seriesSegmentMtx.Unlock() for _, s := range series { // Just make sure all the Refs of Series will insert into seriesSegmentIndexes map for tracking. t.seriesSegmentIndexes[s.Ref] = index @@ -664,12 +668,23 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) { } } +// Update the segment number held against the series, so we can trim older ones in SeriesReset. +func (t *QueueManager) UpdateSeriesSegment(series []record.RefSeries, index int) { + t.seriesSegmentMtx.Lock() + defer t.seriesSegmentMtx.Unlock() + for _, s := range series { + t.seriesSegmentIndexes[s.Ref] = index + } +} + // SeriesReset is used when reading a checkpoint. WAL Watcher should have // stored series records with the checkpoints index number, so we can now // delete any ref ID's lower than that # from the two maps. func (t *QueueManager) SeriesReset(index int) { t.seriesMtx.Lock() defer t.seriesMtx.Unlock() + t.seriesSegmentMtx.Lock() + defer t.seriesSegmentMtx.Unlock() // Check for series that are in segments older than the checkpoint // that were not also present in the checkpoint. for k, v := range t.seriesSegmentIndexes { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 42a91ff593..05fc7cbf40 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -79,8 +79,13 @@ type IndexReader interface { // by the label set of the underlying series. SortedPostings(index.Postings) index.Postings + // ShardedPostings returns a postings list filtered by the provided shardIndex + // out of shardCount. For a given posting, its shard MUST be computed hashing + // the series labels mod shardCount (eg. `labels.Hash() % shardCount == shardIndex`). + ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings + // Series populates the given labels and chunk metas for the series identified - // by the reference. + // by the reference. Chunks are skipped if chks is nil. // Returns storage.ErrNotFound if the ref does not resolve to a known series. Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error @@ -470,6 +475,10 @@ func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings { return r.ir.SortedPostings(p) } +func (r blockIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings { + return r.ir.ShardedPostings(p, shardIndex, shardCount) +} + func (r blockIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { if err := r.ir.Series(ref, lset, chks); err != nil { return errors.Wrapf(err, "block: %s", r.b.Meta().ULID) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go index ad8077c27e..c8efeab1d7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go @@ -95,7 +95,7 @@ func (b *bstream) writeByte(byt byte) { } func (b *bstream) writeBits(u uint64, nbits int) { - u <<= (64 - uint(nbits)) + u <<= 64 - uint(nbits) for nbits >= 8 { byt := byte(u >> 56) b.writeByte(byt) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index bc6915c805..1443e0bd49 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -386,7 +386,7 @@ func (cdm *ChunkDiskMapper) cut() (returnErr error) { cdm.readPathMtx.Unlock() } - mmapFile, err := fileutil.OpenMmapFileWithSize(newFile.Name(), int(MaxHeadChunkFileSize)) + mmapFile, err := fileutil.OpenMmapFileWithSize(newFile.Name(), MaxHeadChunkFileSize) if err != nil { return err } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go index 82270ce863..54ab8ff361 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go @@ -19,6 +19,7 @@ import ( "hash/crc32" "unsafe" + "github.com/dennwc/varint" "github.com/pkg/errors" ) @@ -139,7 +140,7 @@ func NewDecbufUvarintAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Dec } b := bs.Range(off, off+binary.MaxVarintLen32) - l, n := binary.Uvarint(b) + l, n := varint.Uvarint(b) if n <= 0 || n > binary.MaxVarintLen32 { return Decbuf{E: errors.Errorf("invalid uvarint %d", n)} } @@ -207,11 +208,17 @@ func (d *Decbuf) Varint64() int64 { if d.E != nil { return 0 } - x, n := binary.Varint(d.B) + // Decode as unsigned first, since that's what the varint library implements. + ux, n := varint.Uvarint(d.B) if n < 1 { d.E = ErrInvalidSize return 0 } + // Now decode "ZigZag encoding" https://developers.google.com/protocol-buffers/docs/encoding#signed_integers. + x := int64(ux >> 1) + if ux&1 != 0 { + x = ^x + } d.B = d.B[n:] return x } @@ -220,7 +227,7 @@ func (d *Decbuf) Uvarint64() uint64 { if d.E != nil { return 0 } - x, n := binary.Uvarint(d.B) + x, n := varint.Uvarint(d.B) if n < 1 { d.E = ErrInvalidSize return 0 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 92ad7b53cc..a35e7f2560 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -14,12 +14,9 @@ package tsdb import ( - "context" "fmt" "math" "path/filepath" - "runtime" - "sort" "sync" "time" @@ -56,12 +53,6 @@ var ( ErrAppenderClosed = errors.New("appender closed") ) -type ExemplarStorage interface { - storage.ExemplarQueryable - AddExemplar(labels.Labels, exemplar.Exemplar) error - ValidateExemplar(labels.Labels, exemplar.Exemplar) error -} - // Head handles reads and writes of time series data within a time window. type Head struct { chunkRange atomic.Int64 @@ -115,6 +106,12 @@ type Head struct { memTruncationInProcess atomic.Bool } +type ExemplarStorage interface { + storage.ExemplarQueryable + AddExemplar(labels.Labels, exemplar.Exemplar) error + ValidateExemplar(labels.Labels, exemplar.Exemplar) error +} + // HeadOptions are parameters for the Head block. type HeadOptions struct { ChunkRange int64 @@ -144,6 +141,87 @@ func DefaultHeadOptions() *HeadOptions { } } +// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. +// It is always a no-op in Prometheus and mainly meant for external users who import TSDB. +// All the callbacks should be safe to be called concurrently. +// It is up to the user to implement soft or hard consistency by making the callbacks +// atomic or non-atomic. Atomic callbacks can cause degradation performance. +type SeriesLifecycleCallback interface { + // PreCreation is called before creating a series to indicate if the series can be created. + // A non nil error means the series should not be created. + PreCreation(labels.Labels) error + // PostCreation is called after creating a series to indicate a creation of series. + PostCreation(labels.Labels) + // PostDeletion is called after deletion of series. + PostDeletion(...labels.Labels) +} + +// NewHead opens the head block in dir. +func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) { + var err error + if l == nil { + l = log.NewNopLogger() + } + if opts.ChunkRange < 1 { + return nil, errors.Errorf("invalid chunk range %d", opts.ChunkRange) + } + if opts.SeriesCallback == nil { + opts.SeriesCallback = &noopSeriesLifecycleCallback{} + } + + em := NewExemplarMetrics(r) + es, err := NewCircularExemplarStorage(opts.MaxExemplars.Load(), em) + if err != nil { + return nil, err + } + + if stats == nil { + stats = NewHeadStats() + } + + h := &Head{ + wal: wal, + logger: l, + opts: opts, + exemplarMetrics: em, + exemplars: es, + series: newStripeSeries(opts.StripeSize, opts.SeriesCallback), + symbols: map[string]struct{}{}, + postings: index.NewUnorderedMemPostings(), + tombstones: tombstones.NewMemTombstones(), + iso: newIsolation(), + deleted: map[uint64]int{}, + memChunkPool: sync.Pool{ + New: func() interface{} { + return &memChunk{} + }, + }, + stats: stats, + reg: r, + } + h.chunkRange.Store(opts.ChunkRange) + h.minTime.Store(math.MaxInt64) + h.maxTime.Store(math.MinInt64) + h.lastWALTruncationTime.Store(math.MinInt64) + h.lastMemoryTruncationTime.Store(math.MinInt64) + h.metrics = newHeadMetrics(h, r) + + if opts.ChunkPool == nil { + opts.ChunkPool = chunkenc.NewPool() + } + + h.chunkDiskMapper, err = chunks.NewChunkDiskMapper( + mmappedChunksDir(opts.ChunkDirRoot), + opts.ChunkPool, + opts.ChunkWriteBufferSize, + ) + if err != nil { + return nil, err + } + + return h, nil +} + type headMetrics struct { activeAppenders prometheus.Gauge series prometheus.GaugeFunc @@ -318,6 +396,8 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { return m } +func mmappedChunksDir(dir string) string { return filepath.Join(dir, "chunks_head") } + // HeadStats are the statistics for the head component of the DB. type HeadStats struct { WALReplayStatus *WALReplayStatus @@ -325,484 +405,33 @@ type HeadStats struct { // NewHeadStats returns a new HeadStats object. func NewHeadStats() *HeadStats { - return &HeadStats{ - WALReplayStatus: &WALReplayStatus{}, - } -} - -// WALReplayStatus contains status information about the WAL replay. -type WALReplayStatus struct { - sync.RWMutex - Min int - Max int - Current int -} - -// GetWALReplayStatus returns the WAL replay status information. -func (s *WALReplayStatus) GetWALReplayStatus() WALReplayStatus { - s.RLock() - defer s.RUnlock() - - return WALReplayStatus{ - Min: s.Min, - Max: s.Max, - Current: s.Current, - } -} - -const cardinalityCacheExpirationTime = time.Duration(30) * time.Second - -// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. -func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats { - h.cardinalityMutex.Lock() - defer h.cardinalityMutex.Unlock() - currentTime := time.Duration(time.Now().Unix()) * time.Second - seconds := currentTime - h.lastPostingsStatsCall - if seconds > cardinalityCacheExpirationTime { - h.cardinalityCache = nil - } - if h.cardinalityCache != nil { - return h.cardinalityCache - } - h.cardinalityCache = h.postings.Stats(statsByLabelName) - h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second - - return h.cardinalityCache -} - -// NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) { - var err error - if l == nil { - l = log.NewNopLogger() - } - if opts.ChunkRange < 1 { - return nil, errors.Errorf("invalid chunk range %d", opts.ChunkRange) - } - if opts.SeriesCallback == nil { - opts.SeriesCallback = &noopSeriesLifecycleCallback{} - } - - em := NewExemplarMetrics(r) - es, err := NewCircularExemplarStorage(opts.MaxExemplars.Load(), em) - if err != nil { - return nil, err - } - - if stats == nil { - stats = NewHeadStats() - } - - h := &Head{ - wal: wal, - logger: l, - opts: opts, - exemplarMetrics: em, - exemplars: es, - series: newStripeSeries(opts.StripeSize, opts.SeriesCallback), - symbols: map[string]struct{}{}, - postings: index.NewUnorderedMemPostings(), - tombstones: tombstones.NewMemTombstones(), - iso: newIsolation(), - deleted: map[uint64]int{}, - memChunkPool: sync.Pool{ - New: func() interface{} { - return &memChunk{} - }, - }, - stats: stats, - reg: r, - } - h.chunkRange.Store(opts.ChunkRange) - h.minTime.Store(math.MaxInt64) - h.maxTime.Store(math.MinInt64) - h.lastWALTruncationTime.Store(math.MinInt64) - h.lastMemoryTruncationTime.Store(math.MinInt64) - h.metrics = newHeadMetrics(h, r) - - if opts.ChunkPool == nil { - opts.ChunkPool = chunkenc.NewPool() - } - - h.chunkDiskMapper, err = chunks.NewChunkDiskMapper( - mmappedChunksDir(opts.ChunkDirRoot), - opts.ChunkPool, - opts.ChunkWriteBufferSize, - ) - if err != nil { - return nil, err - } - - return h, nil -} - -func mmappedChunksDir(dir string) string { return filepath.Join(dir, "chunks_head") } - -func (h *Head) ApplyConfig(cfg *config.Config) error { - if !h.opts.EnableExemplarStorage { - return nil - } - - // Head uses opts.MaxExemplars in combination with opts.EnableExemplarStorage - // to decide if it should pass exemplars along to it's exemplar storage, so we - // need to update opts.MaxExemplars here. - prevSize := h.opts.MaxExemplars.Load() - h.opts.MaxExemplars.Store(cfg.StorageConfig.ExemplarsConfig.MaxExemplars) - - if prevSize == h.opts.MaxExemplars.Load() { - return nil - } - - migrated := h.exemplars.(*CircularExemplarStorage).Resize(h.opts.MaxExemplars.Load()) - level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", h.opts.MaxExemplars, "migrated", migrated) - return nil -} - -func (h *Head) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { - return h.exemplars.ExemplarQuerier(ctx) -} - -// processWALSamples adds a partition of samples it receives to the head and passes -// them on to other workers. -// Samples before the mint timestamp are discarded. -func (h *Head) processWALSamples( - minValidTime int64, - input <-chan []record.RefSample, output chan<- []record.RefSample, -) (unknownRefs uint64) { - defer close(output) - - // Mitigate lock contention in getByID. - refSeries := map[uint64]*memSeries{} - - mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) - - for samples := range input { - for _, s := range samples { - if s.T < minValidTime { - continue - } - ms := refSeries[s.Ref] - if ms == nil { - ms = h.series.getByID(s.Ref) - if ms == nil { - unknownRefs++ - continue - } - refSeries[s.Ref] = ms - } - if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper); chunkCreated { - h.metrics.chunksCreated.Inc() - h.metrics.chunks.Inc() - } - if s.T > maxt { - maxt = s.T - } - if s.T < mint { - mint = s.T - } - } - output <- samples - } - h.updateMinMaxTime(mint, maxt) - - return unknownRefs -} - -func (h *Head) updateMinMaxTime(mint, maxt int64) { - for { - lt := h.MinTime() - if mint >= lt { - break - } - if h.minTime.CAS(lt, mint) { - break - } - } - for { - ht := h.MaxTime() - if maxt <= ht { - break - } - if h.maxTime.CAS(ht, maxt) { - break - } - } -} - -func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks map[uint64][]*mmappedChunk) (err error) { - // Track number of samples that referenced a series we don't know about - // for error reporting. - var unknownRefs atomic.Uint64 - var unknownExemplarRefs atomic.Uint64 - - // Start workers that each process samples for a partition of the series ID space. - // They are connected through a ring of channels which ensures that all sample batches - // read from the WAL are processed in order. - var ( - wg sync.WaitGroup - n = runtime.GOMAXPROCS(0) - inputs = make([]chan []record.RefSample, n) - outputs = make([]chan []record.RefSample, n) - exemplarsInput chan record.RefExemplar - - dec record.Decoder - shards = make([][]record.RefSample, n) - - decoded = make(chan interface{}, 10) - decodeErr, seriesCreationErr error - seriesPool = sync.Pool{ - New: func() interface{} { - return []record.RefSeries{} - }, - } - samplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefSample{} - }, - } - tstonesPool = sync.Pool{ - New: func() interface{} { - return []tombstones.Stone{} - }, - } - exemplarsPool = sync.Pool{ - New: func() interface{} { - return []record.RefExemplar{} - }, - } - ) - - defer func() { - // For CorruptionErr ensure to terminate all workers before exiting. - _, ok := err.(*wal.CorruptionErr) - if ok || seriesCreationErr != nil { - for i := 0; i < n; i++ { - close(inputs[i]) - for range outputs[i] { - } - } - close(exemplarsInput) - wg.Wait() - } - }() - - wg.Add(n) - for i := 0; i < n; i++ { - outputs[i] = make(chan []record.RefSample, 300) - inputs[i] = make(chan []record.RefSample, 300) - - go func(input <-chan []record.RefSample, output chan<- []record.RefSample) { - unknown := h.processWALSamples(h.minValidTime.Load(), input, output) - unknownRefs.Add(unknown) - wg.Done() - }(inputs[i], outputs[i]) - } - - wg.Add(1) - exemplarsInput = make(chan record.RefExemplar, 300) - go func(input <-chan record.RefExemplar) { - defer wg.Done() - for e := range input { - ms := h.series.getByID(e.Ref) - if ms == nil { - unknownExemplarRefs.Inc() - continue - } - - if e.T < h.minValidTime.Load() { - continue - } - // At the moment the only possible error here is out of order exemplars, which we shouldn't see when - // replaying the WAL, so lets just log the error if it's not that type. - err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) - if err != nil && err == storage.ErrOutOfOrderExemplar { - level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) - } - } - }(exemplarsInput) - - go func() { - defer close(decoded) - for r.Next() { - rec := r.Record() - switch dec.Type(rec) { - case record.Series: - series := seriesPool.Get().([]record.RefSeries)[:0] - series, err = dec.Series(rec, series) - if err != nil { - decodeErr = &wal.CorruptionErr{ - Err: errors.Wrap(err, "decode series"), - Segment: r.Segment(), - Offset: r.Offset(), - } - return - } - decoded <- series - case record.Samples: - samples := samplesPool.Get().([]record.RefSample)[:0] - samples, err = dec.Samples(rec, samples) - if err != nil { - decodeErr = &wal.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), - Segment: r.Segment(), - Offset: r.Offset(), - } - return - } - decoded <- samples - case record.Tombstones: - tstones := tstonesPool.Get().([]tombstones.Stone)[:0] - tstones, err = dec.Tombstones(rec, tstones) - if err != nil { - decodeErr = &wal.CorruptionErr{ - Err: errors.Wrap(err, "decode tombstones"), - Segment: r.Segment(), - Offset: r.Offset(), - } - return - } - decoded <- tstones - case record.Exemplars: - exemplars := exemplarsPool.Get().([]record.RefExemplar)[:0] - exemplars, err = dec.Exemplars(rec, exemplars) - if err != nil { - decodeErr = &wal.CorruptionErr{ - Err: errors.Wrap(err, "decode exemplars"), - Segment: r.Segment(), - Offset: r.Offset(), - } - return - } - decoded <- exemplars - default: - // Noop. - } - } - }() - -Outer: - for d := range decoded { - switch v := d.(type) { - case []record.RefSeries: - for _, s := range v { - series, created, err := h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels) - if err != nil { - seriesCreationErr = err - break Outer - } - - if created { - // If this series gets a duplicate record, we don't restore its mmapped chunks, - // and instead restore everything from WAL records. - series.mmappedChunks = mmappedChunks[series.ref] - - h.metrics.chunks.Add(float64(len(series.mmappedChunks))) - h.metrics.chunksCreated.Add(float64(len(series.mmappedChunks))) - - if len(series.mmappedChunks) > 0 { - h.updateMinMaxTime(series.minTime(), series.maxTime()) - } - } else { - // TODO(codesome) Discard old samples and mmapped chunks and use mmap chunks for the new series ID. - - // There's already a different ref for this series. - multiRef[s.Ref] = series.ref - } - - if h.lastSeriesID.Load() < s.Ref { - h.lastSeriesID.Store(s.Ref) - } - } - //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. - seriesPool.Put(v) - case []record.RefSample: - samples := v - // We split up the samples into chunks of 5000 samples or less. - // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise - // cause thousands of very large in flight buffers occupying large amounts - // of unused memory. - for len(samples) > 0 { - m := 5000 - if len(samples) < m { - m = len(samples) - } - for i := 0; i < n; i++ { - var buf []record.RefSample - select { - case buf = <-outputs[i]: - default: - } - shards[i] = buf[:0] - } - for _, sam := range samples[:m] { - if r, ok := multiRef[sam.Ref]; ok { - sam.Ref = r - } - mod := sam.Ref % uint64(n) - shards[mod] = append(shards[mod], sam) - } - for i := 0; i < n; i++ { - inputs[i] <- shards[i] - } - samples = samples[m:] - } - //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. - samplesPool.Put(v) - case []tombstones.Stone: - for _, s := range v { - for _, itv := range s.Intervals { - if itv.Maxt < h.minValidTime.Load() { - continue - } - if m := h.series.getByID(s.Ref); m == nil { - unknownRefs.Inc() - continue - } - h.tombstones.AddInterval(s.Ref, itv) - } - } - //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. - tstonesPool.Put(v) - case []record.RefExemplar: - for _, e := range v { - exemplarsInput <- e - } - //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. - exemplarsPool.Put(v) - default: - panic(fmt.Errorf("unexpected decoded type: %T", d)) - } - } - - if decodeErr != nil { - return decodeErr - } - if seriesCreationErr != nil { - // Drain the channel to unblock the goroutine. - for range decoded { - } - return seriesCreationErr + return &HeadStats{ + WALReplayStatus: &WALReplayStatus{}, } +} - // Signal termination to each worker and wait for it to close its output channel. - for i := 0; i < n; i++ { - close(inputs[i]) - for range outputs[i] { - } - } - close(exemplarsInput) - wg.Wait() +// WALReplayStatus contains status information about the WAL replay. +type WALReplayStatus struct { + sync.RWMutex + Min int + Max int + Current int +} - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") - } +// GetWALReplayStatus returns the WAL replay status information. +func (s *WALReplayStatus) GetWALReplayStatus() WALReplayStatus { + s.RLock() + defer s.RUnlock() - if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load()) + return WALReplayStatus{ + Min: s.Min, + Max: s.Max, + Current: s.Current, } - return nil } +const cardinalityCacheExpirationTime = time.Duration(30) * time.Second + // Init loads data from the write ahead log and prepares the head for writes. // It should be called before using an appender so that it // limits the ingested samples to the head min valid time. @@ -904,11 +533,6 @@ func (h *Head) Init(minValidTime int64) error { return nil } -// SetMinValidTime sets the minimum timestamp the head can ingest. -func (h *Head) SetMinValidTime(minValidTime int64) { - h.minValidTime.Store(minValidTime) -} - func (h *Head) loadMmappedChunks() (map[uint64][]*mmappedChunk, error) { mmappedChunks := map[uint64][]*mmappedChunk{} if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef, chunkRef uint64, mint, maxt int64, numSamples uint16) error { @@ -959,6 +583,70 @@ func (h *Head) removeCorruptedMmappedChunks(err error) map[uint64][]*mmappedChun return mmappedChunks } +func (h *Head) ApplyConfig(cfg *config.Config) error { + if !h.opts.EnableExemplarStorage { + return nil + } + + // Head uses opts.MaxExemplars in combination with opts.EnableExemplarStorage + // to decide if it should pass exemplars along to it's exemplar storage, so we + // need to update opts.MaxExemplars here. + prevSize := h.opts.MaxExemplars.Load() + h.opts.MaxExemplars.Store(cfg.StorageConfig.ExemplarsConfig.MaxExemplars) + + if prevSize == h.opts.MaxExemplars.Load() { + return nil + } + + migrated := h.exemplars.(*CircularExemplarStorage).Resize(h.opts.MaxExemplars.Load()) + level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", h.opts.MaxExemplars, "migrated", migrated) + return nil +} + +// PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. +func (h *Head) PostingsCardinalityStats(statsByLabelName string) *index.PostingsStats { + h.cardinalityMutex.Lock() + defer h.cardinalityMutex.Unlock() + currentTime := time.Duration(time.Now().Unix()) * time.Second + seconds := currentTime - h.lastPostingsStatsCall + if seconds > cardinalityCacheExpirationTime { + h.cardinalityCache = nil + } + if h.cardinalityCache != nil { + return h.cardinalityCache + } + h.cardinalityCache = h.postings.Stats(statsByLabelName) + h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second + + return h.cardinalityCache +} + +func (h *Head) updateMinMaxTime(mint, maxt int64) { + for { + lt := h.MinTime() + if mint >= lt { + break + } + if h.minTime.CAS(lt, mint) { + break + } + } + for { + ht := h.MaxTime() + if maxt <= ht { + break + } + if h.maxTime.CAS(ht, maxt) { + break + } + } +} + +// SetMinValidTime sets the minimum timestamp the head can ingest. +func (h *Head) SetMinValidTime(minValidTime int64) { + h.minValidTime.Store(minValidTime) +} + // Truncate removes old data before mint from the head and WAL. func (h *Head) Truncate(mint int64) (err error) { initialize := h.MinTime() == math.MaxInt64 @@ -1186,17 +874,6 @@ func (h *Head) truncateWAL(mint int64) error { return nil } -// initTime initializes a head with the first timestamp. This only needs to be called -// for a completely fresh head with an empty WAL. -func (h *Head) initTime(t int64) { - if !h.minTime.CAS(math.MaxInt64, t) { - return - } - // Ensure that max time is initialized to at least the min time we just set. - // Concurrent appenders may already have set it to a higher value. - h.maxTime.CAS(math.MinInt64, t) -} - type Stats struct { NumSeries uint64 MinTime, MaxTime int64 @@ -1264,432 +941,19 @@ func (h *RangeHead) NumSeries() uint64 { func (h *RangeHead) Meta() BlockMeta { return BlockMeta{ MinTime: h.MinTime(), - MaxTime: h.MaxTime(), - ULID: h.head.Meta().ULID, - Stats: BlockStats{ - NumSeries: h.NumSeries(), - }, - } -} - -// String returns an human readable representation of the range head. It's important to -// keep this function in order to avoid the struct dump when the head is stringified in -// errors or logs. -func (h *RangeHead) String() string { - return fmt.Sprintf("range head (mint: %d, maxt: %d)", h.MinTime(), h.MaxTime()) -} - -// initAppender is a helper to initialize the time bounds of the head -// upon the first sample it receives. -type initAppender struct { - app storage.Appender - head *Head -} - -func (a *initAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { - if a.app != nil { - return a.app.Append(ref, lset, t, v) - } - - a.head.initTime(t) - a.app = a.head.appender() - return a.app.Append(ref, lset, t, v) -} - -func (a *initAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { - // Check if exemplar storage is enabled. - if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { - return 0, nil - } - - if a.app != nil { - return a.app.AppendExemplar(ref, l, e) - } - // We should never reach here given we would call Append before AppendExemplar - // and we probably want to always base head/WAL min time on sample times. - a.head.initTime(e.Ts) - a.app = a.head.appender() - - return a.app.AppendExemplar(ref, l, e) -} - -var _ storage.GetRef = &initAppender{} - -func (a *initAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) { - if g, ok := a.app.(storage.GetRef); ok { - return g.GetRef(lset) - } - return 0, nil -} - -func (a *initAppender) Commit() error { - if a.app == nil { - return nil - } - return a.app.Commit() -} - -func (a *initAppender) Rollback() error { - if a.app == nil { - return nil - } - return a.app.Rollback() -} - -// Appender returns a new Appender on the database. -func (h *Head) Appender(_ context.Context) storage.Appender { - h.metrics.activeAppenders.Inc() - - // The head cache might not have a starting point yet. The init appender - // picks up the first appended timestamp as the base. - if h.MinTime() == math.MaxInt64 { - return &initAppender{ - head: h, - } - } - return h.appender() -} - -func (h *Head) appender() *headAppender { - appendID, cleanupAppendIDsBelow := h.iso.newAppendID() - - // Allocate the exemplars buffer only if exemplars are enabled. - var exemplarsBuf []exemplarWithSeriesRef - if h.opts.EnableExemplarStorage { - exemplarsBuf = h.getExemplarBuffer() - } - - return &headAppender{ - head: h, - minValidTime: h.appendableMinValidTime(), - mint: math.MaxInt64, - maxt: math.MinInt64, - samples: h.getAppendBuffer(), - sampleSeries: h.getSeriesBuffer(), - exemplars: exemplarsBuf, - appendID: appendID, - cleanupAppendIDsBelow: cleanupAppendIDsBelow, - } -} - -func (h *Head) appendableMinValidTime() int64 { - // Setting the minimum valid time to whichever is greater, the head min valid time or the compaction window, - // ensures that no samples will be added within the compaction window to avoid races. - return max(h.minValidTime.Load(), h.MaxTime()-h.chunkRange.Load()/2) -} - -func max(a, b int64) int64 { - if a > b { - return a - } - return b -} - -func (h *Head) getAppendBuffer() []record.RefSample { - b := h.appendPool.Get() - if b == nil { - return make([]record.RefSample, 0, 512) - } - return b.([]record.RefSample) -} - -func (h *Head) putAppendBuffer(b []record.RefSample) { - //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. - h.appendPool.Put(b[:0]) -} - -func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef { - b := h.exemplarsPool.Get() - if b == nil { - return make([]exemplarWithSeriesRef, 0, 512) - } - return b.([]exemplarWithSeriesRef) -} - -func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) { - if b == nil { - return - } - - //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. - h.exemplarsPool.Put(b[:0]) -} - -func (h *Head) getSeriesBuffer() []*memSeries { - b := h.seriesPool.Get() - if b == nil { - return make([]*memSeries, 0, 512) - } - return b.([]*memSeries) -} - -func (h *Head) putSeriesBuffer(b []*memSeries) { - //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. - h.seriesPool.Put(b[:0]) -} - -func (h *Head) getBytesBuffer() []byte { - b := h.bytesPool.Get() - if b == nil { - return make([]byte, 0, 1024) - } - return b.([]byte) -} - -func (h *Head) putBytesBuffer(b []byte) { - //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. - h.bytesPool.Put(b[:0]) -} - -type exemplarWithSeriesRef struct { - ref uint64 - exemplar exemplar.Exemplar -} - -type headAppender struct { - head *Head - minValidTime int64 // No samples below this timestamp are allowed. - mint, maxt int64 - - series []record.RefSeries - samples []record.RefSample - exemplars []exemplarWithSeriesRef - sampleSeries []*memSeries - - appendID, cleanupAppendIDsBelow uint64 - closed bool -} - -func (a *headAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { - if t < a.minValidTime { - a.head.metrics.outOfBoundSamples.Inc() - return 0, storage.ErrOutOfBounds - } - - s := a.head.series.getByID(ref) - if s == nil { - // Ensure no empty labels have gotten through. - lset = lset.WithoutEmpty() - if len(lset) == 0 { - return 0, errors.Wrap(ErrInvalidSample, "empty labelset") - } - - if l, dup := lset.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) - } - - var created bool - var err error - s, created, err = a.head.getOrCreate(lset.Hash(), lset) - if err != nil { - return 0, err - } - if created { - a.series = append(a.series, record.RefSeries{ - Ref: s.ref, - Labels: lset, - }) - } - } - - s.Lock() - if err := s.appendable(t, v); err != nil { - s.Unlock() - if err == storage.ErrOutOfOrderSample { - a.head.metrics.outOfOrderSamples.Inc() - } - return 0, err - } - s.pendingCommit = true - s.Unlock() - - if t < a.mint { - a.mint = t - } - if t > a.maxt { - a.maxt = t - } - - a.samples = append(a.samples, record.RefSample{ - Ref: s.ref, - T: t, - V: v, - }) - a.sampleSeries = append(a.sampleSeries, s) - return s.ref, nil -} - -// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't -// use getOrCreate or make any of the lset sanity checks that Append does. -func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Exemplar) (uint64, error) { - // Check if exemplar storage is enabled. - if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { - return 0, nil - } - s := a.head.series.getByID(ref) - if s == nil { - return 0, fmt.Errorf("unknown series ref. when trying to add exemplar: %d", ref) - } - - // Ensure no empty labels have gotten through. - e.Labels = e.Labels.WithoutEmpty() - - err := a.head.exemplars.ValidateExemplar(s.lset, e) - if err != nil { - if err == storage.ErrDuplicateExemplar || err == storage.ErrExemplarsDisabled { - // Duplicate, don't return an error but don't accept the exemplar. - return 0, nil - } - return 0, err - } - - a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e}) - - return s.ref, nil -} - -var _ storage.GetRef = &headAppender{} - -func (a *headAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) { - s := a.head.series.getByHash(lset.Hash(), lset) - if s == nil { - return 0, nil - } - // returned labels must be suitable to pass to Append() - return s.ref, s.lset -} - -func (a *headAppender) log() error { - if a.head.wal == nil { - return nil - } - - buf := a.head.getBytesBuffer() - defer func() { a.head.putBytesBuffer(buf) }() - - var rec []byte - var enc record.Encoder - - if len(a.series) > 0 { - rec = enc.Series(a.series, buf) - buf = rec[:0] - - if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log series") - } - } - if len(a.samples) > 0 { - rec = enc.Samples(a.samples, buf) - buf = rec[:0] - - if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log samples") - } - } - if len(a.exemplars) > 0 { - rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf) - buf = rec[:0] - - if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log exemplars") - } - } - return nil -} - -func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { - ret := make([]record.RefExemplar, 0, len(es)) - for _, e := range es { - ret = append(ret, record.RefExemplar{ - Ref: e.ref, - T: e.exemplar.Ts, - V: e.exemplar.Value, - Labels: e.exemplar.Labels, - }) - } - return ret -} - -func (a *headAppender) Commit() (err error) { - if a.closed { - return ErrAppenderClosed - } - defer func() { a.closed = true }() - - if err := a.log(); err != nil { - _ = a.Rollback() // Most likely the same error will happen again. - return errors.Wrap(err, "write to WAL") - } - - // No errors logging to WAL, so pass the exemplars along to the in memory storage. - for _, e := range a.exemplars { - s := a.head.series.getByID(e.ref) - // We don't instrument exemplar appends here, all is instrumented by storage. - if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil { - if err == storage.ErrOutOfOrderExemplar { - continue - } - level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) - } - } - - defer a.head.metrics.activeAppenders.Dec() - defer a.head.putAppendBuffer(a.samples) - defer a.head.putSeriesBuffer(a.sampleSeries) - defer a.head.putExemplarBuffer(a.exemplars) - defer a.head.iso.closeAppend(a.appendID) - - total := len(a.samples) - var series *memSeries - for i, s := range a.samples { - series = a.sampleSeries[i] - series.Lock() - ok, chunkCreated := series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper) - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() - - if !ok { - total-- - a.head.metrics.outOfOrderSamples.Inc() - } - if chunkCreated { - a.head.metrics.chunks.Inc() - a.head.metrics.chunksCreated.Inc() - } - } - - a.head.metrics.samplesAppended.Add(float64(total)) - a.head.updateMinMaxTime(a.mint, a.maxt) - - return nil -} - -func (a *headAppender) Rollback() (err error) { - if a.closed { - return ErrAppenderClosed - } - defer func() { a.closed = true }() - defer a.head.metrics.activeAppenders.Dec() - defer a.head.iso.closeAppend(a.appendID) - defer a.head.putSeriesBuffer(a.sampleSeries) - - var series *memSeries - for i := range a.samples { - series = a.sampleSeries[i] - series.Lock() - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() + MaxTime: h.MaxTime(), + ULID: h.head.Meta().ULID, + Stats: BlockStats{ + NumSeries: h.NumSeries(), + }, } - a.head.putAppendBuffer(a.samples) - a.head.putExemplarBuffer(a.exemplars) - a.samples = nil - a.exemplars = nil +} - // Series are created in the head memory regardless of rollback. Thus we have - // to log them to the WAL in any case. - return a.log() +// String returns an human readable representation of the range head. It's important to +// keep this function in order to avoid the struct dump when the head is stringified in +// errors or logs. +func (h *RangeHead) String() string { + return fmt.Sprintf("range head (mint: %d, maxt: %d)", h.MinTime(), h.MaxTime()) } // Delete all samples in the range of [mint, maxt] for series that satisfy the given @@ -1793,40 +1057,6 @@ func (h *Head) Tombstones() (tombstones.Reader, error) { return h.tombstones, nil } -// Index returns an IndexReader against the block. -func (h *Head) Index() (IndexReader, error) { - return h.indexRange(math.MinInt64, math.MaxInt64), nil -} - -func (h *Head) indexRange(mint, maxt int64) *headIndexReader { - if hmin := h.MinTime(); hmin > mint { - mint = hmin - } - return &headIndexReader{head: h, mint: mint, maxt: maxt} -} - -// Chunks returns a ChunkReader against the block. -func (h *Head) Chunks() (ChunkReader, error) { - return h.chunksRange(math.MinInt64, math.MaxInt64, h.iso.State(math.MinInt64, math.MaxInt64)) -} - -func (h *Head) chunksRange(mint, maxt int64, is *isolationState) (*headChunkReader, error) { - h.closedMtx.Lock() - defer h.closedMtx.Unlock() - if h.closed { - return nil, errors.New("can't read from a closed head") - } - if hmin := h.MinTime(); hmin > mint { - mint = hmin - } - return &headChunkReader{ - head: h, - mint: mint, - maxt: maxt, - isoState: is, - }, nil -} - // NumSeries returns the number of active series in the head. func (h *Head) NumSeries() uint64 { return h.numSeries.Load() @@ -1883,269 +1113,6 @@ func (h *Head) String() string { return "head" } -type headChunkReader struct { - head *Head - mint, maxt int64 - isoState *isolationState -} - -func (h *headChunkReader) Close() error { - h.isoState.Close() - return nil -} - -// packChunkID packs a seriesID and a chunkID within it into a global 8 byte ID. -// It panicks if the seriesID exceeds 5 bytes or the chunk ID 3 bytes. -func packChunkID(seriesID, chunkID uint64) uint64 { - if seriesID > (1<<40)-1 { - panic("series ID exceeds 5 bytes") - } - if chunkID > (1<<24)-1 { - panic("chunk ID exceeds 3 bytes") - } - return (seriesID << 24) | chunkID -} - -func unpackChunkID(id uint64) (seriesID, chunkID uint64) { - return id >> 24, (id << 40) >> 40 -} - -// Chunk returns the chunk for the reference number. -func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) { - sid, cid := unpackChunkID(ref) - - s := h.head.series.getByID(sid) - // This means that the series has been garbage collected. - if s == nil { - return nil, storage.ErrNotFound - } - - s.Lock() - c, garbageCollect, err := s.chunk(int(cid), h.head.chunkDiskMapper) - if err != nil { - s.Unlock() - return nil, err - } - defer func() { - if garbageCollect { - // Set this to nil so that Go GC can collect it after it has been used. - c.chunk = nil - s.memChunkPool.Put(c) - } - }() - - // This means that the chunk is outside the specified range. - if !c.OverlapsClosedInterval(h.mint, h.maxt) { - s.Unlock() - return nil, storage.ErrNotFound - } - s.Unlock() - - return &safeChunk{ - Chunk: c.chunk, - s: s, - cid: int(cid), - isoState: h.isoState, - chunkDiskMapper: h.head.chunkDiskMapper, - }, nil -} - -type safeChunk struct { - chunkenc.Chunk - s *memSeries - cid int - isoState *isolationState - chunkDiskMapper *chunks.ChunkDiskMapper -} - -func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { - c.s.Lock() - it := c.s.iterator(c.cid, c.isoState, c.chunkDiskMapper, reuseIter) - c.s.Unlock() - return it -} - -type headIndexReader struct { - head *Head - mint, maxt int64 -} - -func (h *headIndexReader) Close() error { - return nil -} - -func (h *headIndexReader) Symbols() index.StringIter { - h.head.symMtx.RLock() - res := make([]string, 0, len(h.head.symbols)) - - for s := range h.head.symbols { - res = append(res, s) - } - h.head.symMtx.RUnlock() - - sort.Strings(res) - return index.NewStringListIter(res) -} - -// SortedLabelValues returns label values present in the head for the -// specific label name that are within the time range mint to maxt. -// If matchers are specified the returned result set is reduced -// to label values of metrics matching the matchers. -func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { - values, err := h.LabelValues(name, matchers...) - if err == nil { - sort.Strings(values) - } - return values, err -} - -// LabelValues returns label values present in the head for the -// specific label name that are within the time range mint to maxt. -// If matchers are specified the returned result set is reduced -// to label values of metrics matching the matchers. -func (h *headIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { - if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { - return []string{}, nil - } - - if len(matchers) == 0 { - h.head.symMtx.RLock() - defer h.head.symMtx.RUnlock() - return h.head.postings.LabelValues(name), nil - } - - return labelValuesWithMatchers(h, name, matchers...) -} - -// LabelNames returns all the unique label names present in the head -// that are within the time range mint to maxt. -func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { - if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { - return []string{}, nil - } - - if len(matchers) == 0 { - h.head.symMtx.RLock() - labelNames := h.head.postings.LabelNames() - h.head.symMtx.RUnlock() - - sort.Strings(labelNames) - return labelNames, nil - } - - return labelNamesWithMatchers(h, matchers...) -} - -// Postings returns the postings list iterator for the label pairs. -func (h *headIndexReader) Postings(name string, values ...string) (index.Postings, error) { - res := make([]index.Postings, 0, len(values)) - for _, value := range values { - res = append(res, h.head.postings.Get(name, value)) - } - return index.Merge(res...), nil -} - -func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { - series := make([]*memSeries, 0, 128) - - // Fetch all the series only once. - for p.Next() { - s := h.head.series.getByID(p.At()) - if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") - } else { - series = append(series, s) - } - } - if err := p.Err(); err != nil { - return index.ErrPostings(errors.Wrap(err, "expand postings")) - } - - sort.Slice(series, func(i, j int) bool { - return labels.Compare(series[i].lset, series[j].lset) < 0 - }) - - // Convert back to list. - ep := make([]uint64, 0, len(series)) - for _, p := range series { - ep = append(ep, p.ref) - } - return index.NewListPostings(ep) -} - -// Series returns the series for the given reference. -func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks.Meta) error { - s := h.head.series.getByID(ref) - - if s == nil { - h.head.metrics.seriesNotFound.Inc() - return storage.ErrNotFound - } - *lbls = append((*lbls)[:0], s.lset...) - - s.Lock() - defer s.Unlock() - - *chks = (*chks)[:0] - - for i, c := range s.mmappedChunks { - // Do not expose chunks that are outside of the specified range. - if !c.OverlapsClosedInterval(h.mint, h.maxt) { - continue - } - *chks = append(*chks, chunks.Meta{ - MinTime: c.minTime, - MaxTime: c.maxTime, - Ref: packChunkID(s.ref, uint64(s.chunkID(i))), - }) - } - if s.headChunk != nil && s.headChunk.OverlapsClosedInterval(h.mint, h.maxt) { - *chks = append(*chks, chunks.Meta{ - MinTime: s.headChunk.minTime, - MaxTime: math.MaxInt64, // Set the head chunks as open (being appended to). - Ref: packChunkID(s.ref, uint64(s.chunkID(len(s.mmappedChunks)))), - }) - } - - return nil -} - -// LabelValueFor returns label value for the given label name in the series referred to by ID. -func (h *headIndexReader) LabelValueFor(id uint64, label string) (string, error) { - memSeries := h.head.series.getByID(id) - if memSeries == nil { - return "", storage.ErrNotFound - } - - value := memSeries.lset.Get(label) - if value == "" { - return "", storage.ErrNotFound - } - - return value, nil -} - -// LabelNamesFor returns all the label names for the series referred to by IDs. -// The names returned are sorted. -func (h *headIndexReader) LabelNamesFor(ids ...uint64) ([]string, error) { - namesMap := make(map[string]struct{}) - for _, id := range ids { - memSeries := h.head.series.getByID(id) - if memSeries == nil { - return nil, storage.ErrNotFound - } - for _, lbl := range memSeries.lset { - namesMap[lbl.Name] = struct{}{} - } - } - names := make([]string, 0, len(namesMap)) - for name := range namesMap { - names = append(names, name) - } - sort.Strings(names) - return names, nil -} - func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, error) { // Just using `getOrCreateWithID` below would be semantically sufficient, but we'd create // a new series on every sample inserted via Add(), which causes allocations @@ -2163,7 +1130,7 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e func (h *Head) getOrCreateWithID(id, hash uint64, lset labels.Labels) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { - return newMemSeries(lset, id, h.chunkRange.Load(), &h.memChunkPool) + return newMemSeries(lset, id, hash, h.chunkRange.Load(), &h.memChunkPool) }) if err != nil { return nil, false, err @@ -2411,6 +1378,7 @@ type memSeries struct { ref uint64 lset labels.Labels + hash uint64 mmappedChunks []*mmappedChunk headChunk *memChunk chunkRange int64 @@ -2427,9 +1395,10 @@ type memSeries struct { txs *txRing } -func newMemSeries(lset labels.Labels, id uint64, chunkRange int64, memChunkPool *sync.Pool) *memSeries { +func newMemSeries(lset labels.Labels, id, hash uint64, chunkRange int64, memChunkPool *sync.Pool) *memSeries { s := &memSeries{ lset: lset, + hash: hash, ref: id, chunkRange: chunkRange, nextAt: math.MinInt64, @@ -2457,104 +1426,6 @@ func (s *memSeries) maxTime() int64 { return c.maxTime } -func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) *memChunk { - s.mmapCurrentHeadChunk(chunkDiskMapper) - - s.headChunk = &memChunk{ - chunk: chunkenc.NewXORChunk(), - minTime: mint, - maxTime: math.MinInt64, - } - - // Set upper bound on when the next chunk must be started. An earlier timestamp - // may be chosen dynamically at a later point. - s.nextAt = rangeForTimestamp(mint, s.chunkRange) - - app, err := s.headChunk.chunk.Appender() - if err != nil { - panic(err) - } - s.app = app - return s.headChunk -} - -func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) { - if s.headChunk == nil { - // There is no head chunk, so nothing to m-map here. - return - } - - chunkRef, err := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk) - if err != nil { - if err != chunks.ErrChunkDiskMapperClosed { - panic(err) - } - } - s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{ - ref: chunkRef, - numSamples: uint16(s.headChunk.chunk.NumSamples()), - minTime: s.headChunk.minTime, - maxTime: s.headChunk.maxTime, - }) -} - -// appendable checks whether the given sample is valid for appending to the series. -func (s *memSeries) appendable(t int64, v float64) error { - c := s.head() - if c == nil { - return nil - } - - if t > c.maxTime { - return nil - } - if t < c.maxTime { - return storage.ErrOutOfOrderSample - } - // We are allowing exact duplicates as we can encounter them in valid cases - // like federation and erroring out at that time would be extremely noisy. - if math.Float64bits(s.sampleBuf[3].v) != math.Float64bits(v) { - return storage.ErrDuplicateSampleForTimestamp - } - return nil -} - -// chunk returns the chunk for the chunk id from memory or by m-mapping it from the disk. -// If garbageCollect is true, it means that the returned *memChunk -// (and not the chunkenc.Chunk inside it) can be garbage collected after it's usage. -func (s *memSeries) chunk(id int, chunkDiskMapper *chunks.ChunkDiskMapper) (chunk *memChunk, garbageCollect bool, err error) { - // ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are - // incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index. - // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix - // is len(s.mmappedChunks), it represents the next chunk, which is the head chunk. - ix := id - s.firstChunkID - if ix < 0 || ix > len(s.mmappedChunks) { - return nil, false, storage.ErrNotFound - } - if ix == len(s.mmappedChunks) { - if s.headChunk == nil { - return nil, false, errors.New("invalid head chunk") - } - return s.headChunk, false, nil - } - chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref) - if err != nil { - if _, ok := err.(*chunks.CorruptionErr); ok { - panic(err) - } - return nil, false, err - } - mc := s.memChunkPool.Get().(*memChunk) - mc.chunk = chk - mc.minTime = s.mmappedChunks[ix].minTime - mc.maxTime = s.mmappedChunks[ix].maxTime - return mc, true, nil -} - -func (s *memSeries) chunkID(pos int) int { - return pos + s.firstChunkID -} - // truncateChunksBefore removes all chunks from the series that // have no timestamp at or after mint. // Chunk IDs remain unchanged. @@ -2580,180 +1451,12 @@ func (s *memSeries) truncateChunksBefore(mint int64) (removed int) { return removed } -// append adds the sample (t, v) to the series. The caller also has to provide -// the appendID for isolation. (The appendID can be zero, which results in no -// isolation for this append.) -// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. -func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper) (sampleInOrder, chunkCreated bool) { - // Based on Gorilla white papers this offers near-optimal compression ratio - // so anything bigger that this has diminishing returns and increases - // the time range within which we have to decompress all samples. - const samplesPerChunk = 120 - - c := s.head() - - if c == nil { - if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t { - // Out of order sample. Sample timestamp is already in the mmaped chunks, so ignore it. - return false, false - } - // There is no chunk in this series yet, create the first chunk for the sample. - c = s.cutNewHeadChunk(t, chunkDiskMapper) - chunkCreated = true - } - numSamples := c.chunk.NumSamples() - - // Out of order sample. - if c.maxTime >= t { - return false, chunkCreated - } - // If we reach 25% of a chunk's desired sample count, set a definitive time - // at which to start the next chunk. - // At latest it must happen at the timestamp set when the chunk was cut. - if numSamples == samplesPerChunk/4 { - s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) - } - if t >= s.nextAt { - c = s.cutNewHeadChunk(t, chunkDiskMapper) - chunkCreated = true - } - s.app.Append(t, v) - - c.maxTime = t - - s.sampleBuf[0] = s.sampleBuf[1] - s.sampleBuf[1] = s.sampleBuf[2] - s.sampleBuf[2] = s.sampleBuf[3] - s.sampleBuf[3] = sample{t: t, v: v} - - if appendID > 0 { - s.txs.add(appendID) - } - - return true, chunkCreated -} - // cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after // acquiring lock. func (s *memSeries) cleanupAppendIDsBelow(bound uint64) { s.txs.cleanupAppendIDsBelow(bound) } -// computeChunkEndTime estimates the end timestamp based the beginning of a -// chunk, its current timestamp and the upper bound up to which we insert data. -// It assumes that the time range is 1/4 full. -func computeChunkEndTime(start, cur, max int64) int64 { - a := (max - start) / ((cur - start + 1) * 4) - if a == 0 { - return max - } - return start + (max-start)/a -} - -// iterator returns a chunk iterator. -// It is unsafe to call this concurrently with s.append(...) without holding the series lock. -func (s *memSeries) iterator(id int, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, it chunkenc.Iterator) chunkenc.Iterator { - c, garbageCollect, err := s.chunk(id, chunkDiskMapper) - // TODO(fabxc): Work around! An error will be returns when a querier have retrieved a pointer to a - // series's chunk, which got then garbage collected before it got - // accessed. We must ensure to not garbage collect as long as any - // readers still hold a reference. - if err != nil { - return chunkenc.NewNopIterator() - } - defer func() { - if garbageCollect { - // Set this to nil so that Go GC can collect it after it has been used. - // This should be done always at the end. - c.chunk = nil - s.memChunkPool.Put(c) - } - }() - - ix := id - s.firstChunkID - - numSamples := c.chunk.NumSamples() - stopAfter := numSamples - - if isoState != nil { - totalSamples := 0 // Total samples in this series. - previousSamples := 0 // Samples before this chunk. - - for j, d := range s.mmappedChunks { - totalSamples += int(d.numSamples) - if j < ix { - previousSamples += int(d.numSamples) - } - } - - if s.headChunk != nil { - totalSamples += s.headChunk.chunk.NumSamples() - } - - // Removing the extra transactionIDs that are relevant for samples that - // come after this chunk, from the total transactionIDs. - appendIDsToConsider := s.txs.txIDCount - (totalSamples - (previousSamples + numSamples)) - - // Iterate over the appendIDs, find the first one that the isolation state says not - // to return. - it := s.txs.iterator() - for index := 0; index < appendIDsToConsider; index++ { - appendID := it.At() - if appendID <= isoState.maxAppendID { // Easy check first. - if _, ok := isoState.incompleteAppends[appendID]; !ok { - it.Next() - continue - } - } - stopAfter = numSamples - (appendIDsToConsider - index) - if stopAfter < 0 { - stopAfter = 0 // Stopped in a previous chunk. - } - break - } - } - - if stopAfter == 0 { - return chunkenc.NewNopIterator() - } - - if id-s.firstChunkID < len(s.mmappedChunks) { - if stopAfter == numSamples { - return c.chunk.Iterator(it) - } - if msIter, ok := it.(*stopIterator); ok { - msIter.Iterator = c.chunk.Iterator(msIter.Iterator) - msIter.i = -1 - msIter.stopAfter = stopAfter - return msIter - } - return &stopIterator{ - Iterator: c.chunk.Iterator(it), - i: -1, - stopAfter: stopAfter, - } - } - // Serve the last 4 samples for the last chunk from the sample buffer - // as their compressed bytes may be mutated by added samples. - if msIter, ok := it.(*memSafeIterator); ok { - msIter.Iterator = c.chunk.Iterator(msIter.Iterator) - msIter.i = -1 - msIter.total = numSamples - msIter.stopAfter = stopAfter - msIter.buf = s.sampleBuf - return msIter - } - return &memSafeIterator{ - stopIterator: stopIterator{ - Iterator: c.chunk.Iterator(it), - i: -1, - stopAfter: stopAfter, - }, - total: numSamples, - buf: s.sampleBuf, - } -} - func (s *memSeries) head() *memChunk { return s.headChunk } @@ -2765,64 +1468,11 @@ type memChunk struct { // OverlapsClosedInterval returns true if the chunk overlaps [mint, maxt]. func (mc *memChunk) OverlapsClosedInterval(mint, maxt int64) bool { - return mc.minTime <= maxt && mint <= mc.maxTime -} - -type stopIterator struct { - chunkenc.Iterator - - i, stopAfter int -} - -func (it *stopIterator) Next() bool { - if it.i+1 >= it.stopAfter { - return false - } - it.i++ - return it.Iterator.Next() -} - -type memSafeIterator struct { - stopIterator - - total int - buf [4]sample -} - -func (it *memSafeIterator) Seek(t int64) bool { - if it.Err() != nil { - return false - } - - ts, _ := it.At() - - for t > ts || it.i == -1 { - if !it.Next() { - return false - } - ts, _ = it.At() - } - - return true -} - -func (it *memSafeIterator) Next() bool { - if it.i+1 >= it.stopAfter { - return false - } - it.i++ - if it.total-it.i > 4 { - return it.Iterator.Next() - } - return true + return overlapsClosedInterval(mc.minTime, mc.maxTime, mint, maxt) } -func (it *memSafeIterator) At() (int64, float64) { - if it.total-it.i > 4 { - return it.Iterator.At() - } - s := it.buf[4-(it.total-it.i)] - return s.t, s.v +func overlapsClosedInterval(mint1, maxt1, mint2, maxt2 int64) bool { + return mint1 <= maxt2 && mint2 <= maxt1 } type mmappedChunk struct { @@ -2833,22 +1483,7 @@ type mmappedChunk struct { // Returns true if the chunk overlaps [mint, maxt]. func (mc *mmappedChunk) OverlapsClosedInterval(mint, maxt int64) bool { - return mc.minTime <= maxt && mint <= mc.maxTime -} - -// SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. -// It is always a no-op in Prometheus and mainly meant for external users who import TSDB. -// All the callbacks should be safe to be called concurrently. -// It is up to the user to implement soft or hard consistency by making the callbacks -// atomic or non-atomic. Atomic callbacks can cause degradation performance. -type SeriesLifecycleCallback interface { - // PreCreation is called before creating a series to indicate if the series can be created. - // A non nil error means the series should not be created. - PreCreation(labels.Labels) error - // PostCreation is called after creating a series to indicate a creation of series. - PostCreation(labels.Labels) - // PostDeletion is called after deletion of series. - PostDeletion(...labels.Labels) + return overlapsClosedInterval(mc.minTime, mc.maxTime, mint, maxt) } type noopSeriesLifecycleCallback struct{} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go new file mode 100644 index 0000000000..85a3ff14a3 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -0,0 +1,583 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "context" + "fmt" + "math" + + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/record" +) + +// initAppender is a helper to initialize the time bounds of the head +// upon the first sample it receives. +type initAppender struct { + app storage.Appender + head *Head +} + +var _ storage.GetRef = &initAppender{} + +func (a *initAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { + if a.app != nil { + return a.app.Append(ref, lset, t, v) + } + + a.head.initTime(t) + a.app = a.head.appender() + return a.app.Append(ref, lset, t, v) +} + +func (a *initAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { + // Check if exemplar storage is enabled. + if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { + return 0, nil + } + + if a.app != nil { + return a.app.AppendExemplar(ref, l, e) + } + // We should never reach here given we would call Append before AppendExemplar + // and we probably want to always base head/WAL min time on sample times. + a.head.initTime(e.Ts) + a.app = a.head.appender() + + return a.app.AppendExemplar(ref, l, e) +} + +// initTime initializes a head with the first timestamp. This only needs to be called +// for a completely fresh head with an empty WAL. +func (h *Head) initTime(t int64) { + if !h.minTime.CAS(math.MaxInt64, t) { + return + } + // Ensure that max time is initialized to at least the min time we just set. + // Concurrent appenders may already have set it to a higher value. + h.maxTime.CAS(math.MinInt64, t) +} + +func (a *initAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) { + if g, ok := a.app.(storage.GetRef); ok { + return g.GetRef(lset) + } + return 0, nil +} + +func (a *initAppender) Commit() error { + if a.app == nil { + return nil + } + return a.app.Commit() +} + +func (a *initAppender) Rollback() error { + if a.app == nil { + return nil + } + return a.app.Rollback() +} + +// Appender returns a new Appender on the database. +func (h *Head) Appender(_ context.Context) storage.Appender { + h.metrics.activeAppenders.Inc() + + // The head cache might not have a starting point yet. The init appender + // picks up the first appended timestamp as the base. + if h.MinTime() == math.MaxInt64 { + return &initAppender{ + head: h, + } + } + return h.appender() +} + +func (h *Head) appender() *headAppender { + appendID, cleanupAppendIDsBelow := h.iso.newAppendID() + + // Allocate the exemplars buffer only if exemplars are enabled. + var exemplarsBuf []exemplarWithSeriesRef + if h.opts.EnableExemplarStorage { + exemplarsBuf = h.getExemplarBuffer() + } + + return &headAppender{ + head: h, + minValidTime: h.appendableMinValidTime(), + mint: math.MaxInt64, + maxt: math.MinInt64, + samples: h.getAppendBuffer(), + sampleSeries: h.getSeriesBuffer(), + exemplars: exemplarsBuf, + appendID: appendID, + cleanupAppendIDsBelow: cleanupAppendIDsBelow, + } +} + +func (h *Head) appendableMinValidTime() int64 { + // Setting the minimum valid time to whichever is greater, the head min valid time or the compaction window, + // ensures that no samples will be added within the compaction window to avoid races. + return max(h.minValidTime.Load(), h.MaxTime()-h.chunkRange.Load()/2) +} + +func max(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func (h *Head) getAppendBuffer() []record.RefSample { + b := h.appendPool.Get() + if b == nil { + return make([]record.RefSample, 0, 512) + } + return b.([]record.RefSample) +} + +func (h *Head) putAppendBuffer(b []record.RefSample) { + //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. + h.appendPool.Put(b[:0]) +} + +func (h *Head) getExemplarBuffer() []exemplarWithSeriesRef { + b := h.exemplarsPool.Get() + if b == nil { + return make([]exemplarWithSeriesRef, 0, 512) + } + return b.([]exemplarWithSeriesRef) +} + +func (h *Head) putExemplarBuffer(b []exemplarWithSeriesRef) { + if b == nil { + return + } + + //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. + h.exemplarsPool.Put(b[:0]) +} + +func (h *Head) getSeriesBuffer() []*memSeries { + b := h.seriesPool.Get() + if b == nil { + return make([]*memSeries, 0, 512) + } + return b.([]*memSeries) +} + +func (h *Head) putSeriesBuffer(b []*memSeries) { + //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. + h.seriesPool.Put(b[:0]) +} + +func (h *Head) getBytesBuffer() []byte { + b := h.bytesPool.Get() + if b == nil { + return make([]byte, 0, 1024) + } + return b.([]byte) +} + +func (h *Head) putBytesBuffer(b []byte) { + //nolint:staticcheck // Ignore SA6002 safe to ignore and actually fixing it has some performance penalty. + h.bytesPool.Put(b[:0]) +} + +type exemplarWithSeriesRef struct { + ref uint64 + exemplar exemplar.Exemplar +} + +type headAppender struct { + head *Head + minValidTime int64 // No samples below this timestamp are allowed. + mint, maxt int64 + + series []record.RefSeries + samples []record.RefSample + exemplars []exemplarWithSeriesRef + sampleSeries []*memSeries + + appendID, cleanupAppendIDsBelow uint64 + closed bool +} + +func (a *headAppender) Append(ref uint64, lset labels.Labels, t int64, v float64) (uint64, error) { + if t < a.minValidTime { + a.head.metrics.outOfBoundSamples.Inc() + return 0, storage.ErrOutOfBounds + } + + s := a.head.series.getByID(ref) + if s == nil { + // Ensure no empty labels have gotten through. + lset = lset.WithoutEmpty() + if len(lset) == 0 { + return 0, errors.Wrap(ErrInvalidSample, "empty labelset") + } + + if l, dup := lset.HasDuplicateLabelNames(); dup { + return 0, errors.Wrap(ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, l)) + } + + var created bool + var err error + s, created, err = a.head.getOrCreate(lset.Hash(), lset) + if err != nil { + return 0, err + } + if created { + a.series = append(a.series, record.RefSeries{ + Ref: s.ref, + Labels: lset, + }) + } + } + + s.Lock() + if err := s.appendable(t, v); err != nil { + s.Unlock() + if err == storage.ErrOutOfOrderSample { + a.head.metrics.outOfOrderSamples.Inc() + } + return 0, err + } + s.pendingCommit = true + s.Unlock() + + if t < a.mint { + a.mint = t + } + if t > a.maxt { + a.maxt = t + } + + a.samples = append(a.samples, record.RefSample{ + Ref: s.ref, + T: t, + V: v, + }) + a.sampleSeries = append(a.sampleSeries, s) + return s.ref, nil +} + +// appendable checks whether the given sample is valid for appending to the series. +func (s *memSeries) appendable(t int64, v float64) error { + c := s.head() + if c == nil { + return nil + } + + if t > c.maxTime { + return nil + } + if t < c.maxTime { + return storage.ErrOutOfOrderSample + } + // We are allowing exact duplicates as we can encounter them in valid cases + // like federation and erroring out at that time would be extremely noisy. + if math.Float64bits(s.sampleBuf[3].v) != math.Float64bits(v) { + return storage.ErrDuplicateSampleForTimestamp + } + return nil +} + +// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't +// use getOrCreate or make any of the lset sanity checks that Append does. +func (a *headAppender) AppendExemplar(ref uint64, _ labels.Labels, e exemplar.Exemplar) (uint64, error) { + // Check if exemplar storage is enabled. + if !a.head.opts.EnableExemplarStorage || a.head.opts.MaxExemplars.Load() <= 0 { + return 0, nil + } + s := a.head.series.getByID(ref) + if s == nil { + return 0, fmt.Errorf("unknown series ref. when trying to add exemplar: %d", ref) + } + + // Ensure no empty labels have gotten through. + e.Labels = e.Labels.WithoutEmpty() + + err := a.head.exemplars.ValidateExemplar(s.lset, e) + if err != nil { + if err == storage.ErrDuplicateExemplar || err == storage.ErrExemplarsDisabled { + // Duplicate, don't return an error but don't accept the exemplar. + return 0, nil + } + return 0, err + } + + a.exemplars = append(a.exemplars, exemplarWithSeriesRef{ref, e}) + + return s.ref, nil +} + +var _ storage.GetRef = &headAppender{} + +func (a *headAppender) GetRef(lset labels.Labels) (uint64, labels.Labels) { + s := a.head.series.getByHash(lset.Hash(), lset) + if s == nil { + return 0, nil + } + // returned labels must be suitable to pass to Append() + return s.ref, s.lset +} + +func (a *headAppender) log() error { + if a.head.wal == nil { + return nil + } + + buf := a.head.getBytesBuffer() + defer func() { a.head.putBytesBuffer(buf) }() + + var rec []byte + var enc record.Encoder + + if len(a.series) > 0 { + rec = enc.Series(a.series, buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log series") + } + } + if len(a.samples) > 0 { + rec = enc.Samples(a.samples, buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log samples") + } + } + if len(a.exemplars) > 0 { + rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log exemplars") + } + } + return nil +} + +func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { + ret := make([]record.RefExemplar, 0, len(es)) + for _, e := range es { + ret = append(ret, record.RefExemplar{ + Ref: e.ref, + T: e.exemplar.Ts, + V: e.exemplar.Value, + Labels: e.exemplar.Labels, + }) + } + return ret +} + +func (a *headAppender) Commit() (err error) { + if a.closed { + return ErrAppenderClosed + } + defer func() { a.closed = true }() + + if err := a.log(); err != nil { + _ = a.Rollback() // Most likely the same error will happen again. + return errors.Wrap(err, "write to WAL") + } + + // No errors logging to WAL, so pass the exemplars along to the in memory storage. + for _, e := range a.exemplars { + s := a.head.series.getByID(e.ref) + // We don't instrument exemplar appends here, all is instrumented by storage. + if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil { + if err == storage.ErrOutOfOrderExemplar { + continue + } + level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) + } + } + + defer a.head.metrics.activeAppenders.Dec() + defer a.head.putAppendBuffer(a.samples) + defer a.head.putSeriesBuffer(a.sampleSeries) + defer a.head.putExemplarBuffer(a.exemplars) + defer a.head.iso.closeAppend(a.appendID) + + total := len(a.samples) + var series *memSeries + for i, s := range a.samples { + series = a.sampleSeries[i] + series.Lock() + ok, chunkCreated := series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper) + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() + + if !ok { + total-- + a.head.metrics.outOfOrderSamples.Inc() + } + if chunkCreated { + a.head.metrics.chunks.Inc() + a.head.metrics.chunksCreated.Inc() + } + } + + a.head.metrics.samplesAppended.Add(float64(total)) + a.head.updateMinMaxTime(a.mint, a.maxt) + + return nil +} + +// append adds the sample (t, v) to the series. The caller also has to provide +// the appendID for isolation. (The appendID can be zero, which results in no +// isolation for this append.) +// It is unsafe to call this concurrently with s.iterator(...) without holding the series lock. +func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper *chunks.ChunkDiskMapper) (sampleInOrder, chunkCreated bool) { + // Based on Gorilla white papers this offers near-optimal compression ratio + // so anything bigger that this has diminishing returns and increases + // the time range within which we have to decompress all samples. + const samplesPerChunk = 120 + + c := s.head() + + if c == nil { + if len(s.mmappedChunks) > 0 && s.mmappedChunks[len(s.mmappedChunks)-1].maxTime >= t { + // Out of order sample. Sample timestamp is already in the mmaped chunks, so ignore it. + return false, false + } + // There is no chunk in this series yet, create the first chunk for the sample. + c = s.cutNewHeadChunk(t, chunkDiskMapper) + chunkCreated = true + } + numSamples := c.chunk.NumSamples() + + // Out of order sample. + if c.maxTime >= t { + return false, chunkCreated + } + // If we reach 25% of a chunk's desired sample count, predict an end time + // for this chunk that will try to make samples equally distributed within + // the remaining chunks in the current chunk range. + // At latest it must happen at the timestamp set when the chunk was cut. + if numSamples == samplesPerChunk/4 { + s.nextAt = computeChunkEndTime(c.minTime, c.maxTime, s.nextAt) + } + if t >= s.nextAt { + c = s.cutNewHeadChunk(t, chunkDiskMapper) + chunkCreated = true + } + s.app.Append(t, v) + + c.maxTime = t + + s.sampleBuf[0] = s.sampleBuf[1] + s.sampleBuf[1] = s.sampleBuf[2] + s.sampleBuf[2] = s.sampleBuf[3] + s.sampleBuf[3] = sample{t: t, v: v} + + if appendID > 0 { + s.txs.add(appendID) + } + + return true, chunkCreated +} + +// computeChunkEndTime estimates the end timestamp based the beginning of a +// chunk, its current timestamp and the upper bound up to which we insert data. +// It assumes that the time range is 1/4 full. +// Assuming that the samples will keep arriving at the same rate, it will make the +// remaining n chunks within this chunk range (before max) equally sized. +func computeChunkEndTime(start, cur, max int64) int64 { + n := (max - start) / ((cur - start + 1) * 4) + if n <= 1 { + return max + } + return start + (max-start)/n +} + +func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) *memChunk { + s.mmapCurrentHeadChunk(chunkDiskMapper) + + s.headChunk = &memChunk{ + chunk: chunkenc.NewXORChunk(), + minTime: mint, + maxTime: math.MinInt64, + } + + // Set upper bound on when the next chunk must be started. An earlier timestamp + // may be chosen dynamically at a later point. + s.nextAt = rangeForTimestamp(mint, s.chunkRange) + + app, err := s.headChunk.chunk.Appender() + if err != nil { + panic(err) + } + s.app = app + return s.headChunk +} + +func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) { + if s.headChunk == nil { + // There is no head chunk, so nothing to m-map here. + return + } + + chunkRef, err := chunkDiskMapper.WriteChunk(s.ref, s.headChunk.minTime, s.headChunk.maxTime, s.headChunk.chunk) + if err != nil { + if err != chunks.ErrChunkDiskMapperClosed { + panic(err) + } + } + s.mmappedChunks = append(s.mmappedChunks, &mmappedChunk{ + ref: chunkRef, + numSamples: uint16(s.headChunk.chunk.NumSamples()), + minTime: s.headChunk.minTime, + maxTime: s.headChunk.maxTime, + }) +} + +func (a *headAppender) Rollback() (err error) { + if a.closed { + return ErrAppenderClosed + } + defer func() { a.closed = true }() + defer a.head.metrics.activeAppenders.Dec() + defer a.head.iso.closeAppend(a.appendID) + defer a.head.putSeriesBuffer(a.sampleSeries) + + var series *memSeries + for i := range a.samples { + series = a.sampleSeries[i] + series.Lock() + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() + } + a.head.putAppendBuffer(a.samples) + a.head.putExemplarBuffer(a.exemplars) + a.samples = nil + a.exemplars = nil + + // Series are created in the head memory regardless of rollback. Thus we have + // to log them to the WAL in any case. + return a.log() +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go new file mode 100644 index 0000000000..fecb5b5fe5 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -0,0 +1,553 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "context" + "math" + "sort" + + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/index" +) + +func (h *Head) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { + return h.exemplars.ExemplarQuerier(ctx) +} + +// Index returns an IndexReader against the block. +func (h *Head) Index() (IndexReader, error) { + return h.indexRange(math.MinInt64, math.MaxInt64), nil +} + +func (h *Head) indexRange(mint, maxt int64) *headIndexReader { + if hmin := h.MinTime(); hmin > mint { + mint = hmin + } + return &headIndexReader{head: h, mint: mint, maxt: maxt} +} + +type headIndexReader struct { + head *Head + mint, maxt int64 +} + +func (h *headIndexReader) Close() error { + return nil +} + +func (h *headIndexReader) Symbols() index.StringIter { + h.head.symMtx.RLock() + res := make([]string, 0, len(h.head.symbols)) + + for s := range h.head.symbols { + res = append(res, s) + } + h.head.symMtx.RUnlock() + + sort.Strings(res) + return index.NewStringListIter(res) +} + +// SortedLabelValues returns label values present in the head for the +// specific label name that are within the time range mint to maxt. +// If matchers are specified the returned result set is reduced +// to label values of metrics matching the matchers. +func (h *headIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + values, err := h.LabelValues(name, matchers...) + if err == nil { + sort.Strings(values) + } + return values, err +} + +// LabelValues returns label values present in the head for the +// specific label name that are within the time range mint to maxt. +// If matchers are specified the returned result set is reduced +// to label values of metrics matching the matchers. +func (h *headIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { + return []string{}, nil + } + + if len(matchers) == 0 { + h.head.symMtx.RLock() + defer h.head.symMtx.RUnlock() + return h.head.postings.LabelValues(name), nil + } + + return labelValuesWithMatchers(h, name, matchers...) +} + +// LabelNames returns all the unique label names present in the head +// that are within the time range mint to maxt. +func (h *headIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { + if h.maxt < h.head.MinTime() || h.mint > h.head.MaxTime() { + return []string{}, nil + } + + if len(matchers) == 0 { + h.head.symMtx.RLock() + labelNames := h.head.postings.LabelNames() + h.head.symMtx.RUnlock() + + sort.Strings(labelNames) + return labelNames, nil + } + + return labelNamesWithMatchers(h, matchers...) +} + +// Postings returns the postings list iterator for the label pairs. +func (h *headIndexReader) Postings(name string, values ...string) (index.Postings, error) { + res := make([]index.Postings, 0, len(values)) + for _, value := range values { + res = append(res, h.head.postings.Get(name, value)) + } + return index.Merge(res...), nil +} + +func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { + series := make([]*memSeries, 0, 128) + + // Fetch all the series only once. + for p.Next() { + s := h.head.series.getByID(p.At()) + if s == nil { + level.Debug(h.head.logger).Log("msg", "Looked up series not found") + } else { + series = append(series, s) + } + } + if err := p.Err(); err != nil { + return index.ErrPostings(errors.Wrap(err, "expand postings")) + } + + sort.Slice(series, func(i, j int) bool { + return labels.Compare(series[i].lset, series[j].lset) < 0 + }) + + // Convert back to list. + ep := make([]uint64, 0, len(series)) + for _, p := range series { + ep = append(ep, p.ref) + } + return index.NewListPostings(ep) +} + +func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings { + out := make([]uint64, 0, 128) + + for p.Next() { + s := h.head.series.getByID(p.At()) + if s == nil { + level.Debug(h.head.logger).Log("msg", "Looked up series not found") + continue + } + + // Check if the series belong to the shard. + if s.hash%shardCount != shardIndex { + continue + } + + out = append(out, s.ref) + } + + return index.NewListPostings(out) +} + +// Series returns the series for the given reference. +// Chunks are skipped if chks is nil. +func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks.Meta) error { + s := h.head.series.getByID(ref) + + if s == nil { + h.head.metrics.seriesNotFound.Inc() + return storage.ErrNotFound + } + *lbls = append((*lbls)[:0], s.lset...) + + if chks == nil { + return nil + } + + s.Lock() + defer s.Unlock() + + *chks = (*chks)[:0] + + for i, c := range s.mmappedChunks { + // Do not expose chunks that are outside of the specified range. + if !c.OverlapsClosedInterval(h.mint, h.maxt) { + continue + } + *chks = append(*chks, chunks.Meta{ + MinTime: c.minTime, + MaxTime: c.maxTime, + Ref: packChunkID(s.ref, uint64(s.chunkID(i))), + }) + } + if s.headChunk != nil && s.headChunk.OverlapsClosedInterval(h.mint, h.maxt) { + *chks = append(*chks, chunks.Meta{ + MinTime: s.headChunk.minTime, + MaxTime: math.MaxInt64, // Set the head chunks as open (being appended to). + Ref: packChunkID(s.ref, uint64(s.chunkID(len(s.mmappedChunks)))), + }) + } + + return nil +} + +func (s *memSeries) chunkID(pos int) int { + return pos + s.firstChunkID +} + +// LabelValueFor returns label value for the given label name in the series referred to by ID. +func (h *headIndexReader) LabelValueFor(id uint64, label string) (string, error) { + memSeries := h.head.series.getByID(id) + if memSeries == nil { + return "", storage.ErrNotFound + } + + value := memSeries.lset.Get(label) + if value == "" { + return "", storage.ErrNotFound + } + + return value, nil +} + +// LabelNamesFor returns all the label names for the series referred to by IDs. +// The names returned are sorted. +func (h *headIndexReader) LabelNamesFor(ids ...uint64) ([]string, error) { + namesMap := make(map[string]struct{}) + for _, id := range ids { + memSeries := h.head.series.getByID(id) + if memSeries == nil { + return nil, storage.ErrNotFound + } + for _, lbl := range memSeries.lset { + namesMap[lbl.Name] = struct{}{} + } + } + names := make([]string, 0, len(namesMap)) + for name := range namesMap { + names = append(names, name) + } + sort.Strings(names) + return names, nil +} + +// Chunks returns a ChunkReader against the block. +func (h *Head) Chunks() (ChunkReader, error) { + return h.chunksRange(math.MinInt64, math.MaxInt64, h.iso.State(math.MinInt64, math.MaxInt64)) +} + +func (h *Head) chunksRange(mint, maxt int64, is *isolationState) (*headChunkReader, error) { + h.closedMtx.Lock() + defer h.closedMtx.Unlock() + if h.closed { + return nil, errors.New("can't read from a closed head") + } + if hmin := h.MinTime(); hmin > mint { + mint = hmin + } + return &headChunkReader{ + head: h, + mint: mint, + maxt: maxt, + isoState: is, + }, nil +} + +type headChunkReader struct { + head *Head + mint, maxt int64 + isoState *isolationState +} + +func (h *headChunkReader) Close() error { + h.isoState.Close() + return nil +} + +// packChunkID packs a seriesID and a chunkID within it into a global 8 byte ID. +// It panicks if the seriesID exceeds 5 bytes or the chunk ID 3 bytes. +func packChunkID(seriesID, chunkID uint64) uint64 { + if seriesID > (1<<40)-1 { + panic("series ID exceeds 5 bytes") + } + if chunkID > (1<<24)-1 { + panic("chunk ID exceeds 3 bytes") + } + return (seriesID << 24) | chunkID +} + +func unpackChunkID(id uint64) (seriesID, chunkID uint64) { + return id >> 24, (id << 40) >> 40 +} + +// Chunk returns the chunk for the reference number. +func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) { + sid, cid := unpackChunkID(ref) + + s := h.head.series.getByID(sid) + // This means that the series has been garbage collected. + if s == nil { + return nil, storage.ErrNotFound + } + + s.Lock() + c, garbageCollect, err := s.chunk(int(cid), h.head.chunkDiskMapper) + if err != nil { + s.Unlock() + return nil, err + } + defer func() { + if garbageCollect { + // Set this to nil so that Go GC can collect it after it has been used. + c.chunk = nil + s.memChunkPool.Put(c) + } + }() + + // This means that the chunk is outside the specified range. + if !c.OverlapsClosedInterval(h.mint, h.maxt) { + s.Unlock() + return nil, storage.ErrNotFound + } + s.Unlock() + + return &safeChunk{ + Chunk: c.chunk, + s: s, + cid: int(cid), + isoState: h.isoState, + chunkDiskMapper: h.head.chunkDiskMapper, + }, nil +} + +// chunk returns the chunk for the chunk id from memory or by m-mapping it from the disk. +// If garbageCollect is true, it means that the returned *memChunk +// (and not the chunkenc.Chunk inside it) can be garbage collected after it's usage. +func (s *memSeries) chunk(id int, chunkDiskMapper *chunks.ChunkDiskMapper) (chunk *memChunk, garbageCollect bool, err error) { + // ix represents the index of chunk in the s.mmappedChunks slice. The chunk id's are + // incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index. + // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix + // is len(s.mmappedChunks), it represents the next chunk, which is the head chunk. + ix := id - s.firstChunkID + if ix < 0 || ix > len(s.mmappedChunks) { + return nil, false, storage.ErrNotFound + } + if ix == len(s.mmappedChunks) { + if s.headChunk == nil { + return nil, false, errors.New("invalid head chunk") + } + return s.headChunk, false, nil + } + chk, err := chunkDiskMapper.Chunk(s.mmappedChunks[ix].ref) + if err != nil { + if _, ok := err.(*chunks.CorruptionErr); ok { + panic(err) + } + return nil, false, err + } + mc := s.memChunkPool.Get().(*memChunk) + mc.chunk = chk + mc.minTime = s.mmappedChunks[ix].minTime + mc.maxTime = s.mmappedChunks[ix].maxTime + return mc, true, nil +} + +type safeChunk struct { + chunkenc.Chunk + s *memSeries + cid int + isoState *isolationState + chunkDiskMapper *chunks.ChunkDiskMapper +} + +func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { + c.s.Lock() + it := c.s.iterator(c.cid, c.isoState, c.chunkDiskMapper, reuseIter) + c.s.Unlock() + return it +} + +// iterator returns a chunk iterator. +// It is unsafe to call this concurrently with s.append(...) without holding the series lock. +func (s *memSeries) iterator(id int, isoState *isolationState, chunkDiskMapper *chunks.ChunkDiskMapper, it chunkenc.Iterator) chunkenc.Iterator { + c, garbageCollect, err := s.chunk(id, chunkDiskMapper) + // TODO(fabxc): Work around! An error will be returns when a querier have retrieved a pointer to a + // series's chunk, which got then garbage collected before it got + // accessed. We must ensure to not garbage collect as long as any + // readers still hold a reference. + if err != nil { + return chunkenc.NewNopIterator() + } + defer func() { + if garbageCollect { + // Set this to nil so that Go GC can collect it after it has been used. + // This should be done always at the end. + c.chunk = nil + s.memChunkPool.Put(c) + } + }() + + ix := id - s.firstChunkID + + numSamples := c.chunk.NumSamples() + stopAfter := numSamples + + if isoState != nil { + totalSamples := 0 // Total samples in this series. + previousSamples := 0 // Samples before this chunk. + + for j, d := range s.mmappedChunks { + totalSamples += int(d.numSamples) + if j < ix { + previousSamples += int(d.numSamples) + } + } + + if s.headChunk != nil { + totalSamples += s.headChunk.chunk.NumSamples() + } + + // Removing the extra transactionIDs that are relevant for samples that + // come after this chunk, from the total transactionIDs. + appendIDsToConsider := s.txs.txIDCount - (totalSamples - (previousSamples + numSamples)) + + // Iterate over the appendIDs, find the first one that the isolation state says not + // to return. + it := s.txs.iterator() + for index := 0; index < appendIDsToConsider; index++ { + appendID := it.At() + if appendID <= isoState.maxAppendID { // Easy check first. + if _, ok := isoState.incompleteAppends[appendID]; !ok { + it.Next() + continue + } + } + stopAfter = numSamples - (appendIDsToConsider - index) + if stopAfter < 0 { + stopAfter = 0 // Stopped in a previous chunk. + } + break + } + } + + if stopAfter == 0 { + return chunkenc.NewNopIterator() + } + + if id-s.firstChunkID < len(s.mmappedChunks) { + if stopAfter == numSamples { + return c.chunk.Iterator(it) + } + if msIter, ok := it.(*stopIterator); ok { + msIter.Iterator = c.chunk.Iterator(msIter.Iterator) + msIter.i = -1 + msIter.stopAfter = stopAfter + return msIter + } + return &stopIterator{ + Iterator: c.chunk.Iterator(it), + i: -1, + stopAfter: stopAfter, + } + } + // Serve the last 4 samples for the last chunk from the sample buffer + // as their compressed bytes may be mutated by added samples. + if msIter, ok := it.(*memSafeIterator); ok { + msIter.Iterator = c.chunk.Iterator(msIter.Iterator) + msIter.i = -1 + msIter.total = numSamples + msIter.stopAfter = stopAfter + msIter.buf = s.sampleBuf + return msIter + } + return &memSafeIterator{ + stopIterator: stopIterator{ + Iterator: c.chunk.Iterator(it), + i: -1, + stopAfter: stopAfter, + }, + total: numSamples, + buf: s.sampleBuf, + } +} + +type memSafeIterator struct { + stopIterator + + total int + buf [4]sample +} + +func (it *memSafeIterator) Seek(t int64) bool { + if it.Err() != nil { + return false + } + + ts, _ := it.At() + + for t > ts || it.i == -1 { + if !it.Next() { + return false + } + ts, _ = it.At() + } + + return true +} + +func (it *memSafeIterator) Next() bool { + if it.i+1 >= it.stopAfter { + return false + } + it.i++ + if it.total-it.i > 4 { + return it.Iterator.Next() + } + return true +} + +func (it *memSafeIterator) At() (int64, float64) { + if it.total-it.i > 4 { + return it.Iterator.At() + } + s := it.buf[4-(it.total-it.i)] + return s.t, s.v +} + +type stopIterator struct { + chunkenc.Iterator + + i, stopAfter int +} + +func (it *stopIterator) Next() bool { + if it.i+1 >= it.stopAfter { + return false + } + it.i++ + return it.Iterator.Next() +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go new file mode 100644 index 0000000000..056bd288a1 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -0,0 +1,388 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "fmt" + "math" + "runtime" + "sync" + "time" + + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "go.uber.org/atomic" + + "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/tombstones" + "github.com/prometheus/prometheus/tsdb/wal" +) + +func (h *Head) loadWAL(r *wal.Reader, multiRef map[uint64]uint64, mmappedChunks map[uint64][]*mmappedChunk) (err error) { + // Track number of samples that referenced a series we don't know about + // for error reporting. + var unknownRefs atomic.Uint64 + var unknownExemplarRefs atomic.Uint64 + + // Start workers that each process samples for a partition of the series ID space. + // They are connected through a ring of channels which ensures that all sample batches + // read from the WAL are processed in order. + var ( + wg sync.WaitGroup + n = runtime.GOMAXPROCS(0) + inputs = make([]chan []record.RefSample, n) + outputs = make([]chan []record.RefSample, n) + exemplarsInput chan record.RefExemplar + + dec record.Decoder + shards = make([][]record.RefSample, n) + + decoded = make(chan interface{}, 10) + decodeErr, seriesCreationErr error + seriesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSeries{} + }, + } + samplesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + tstonesPool = sync.Pool{ + New: func() interface{} { + return []tombstones.Stone{} + }, + } + exemplarsPool = sync.Pool{ + New: func() interface{} { + return []record.RefExemplar{} + }, + } + ) + + defer func() { + // For CorruptionErr ensure to terminate all workers before exiting. + _, ok := err.(*wal.CorruptionErr) + if ok || seriesCreationErr != nil { + for i := 0; i < n; i++ { + close(inputs[i]) + for range outputs[i] { + } + } + close(exemplarsInput) + wg.Wait() + } + }() + + wg.Add(n) + for i := 0; i < n; i++ { + outputs[i] = make(chan []record.RefSample, 300) + inputs[i] = make(chan []record.RefSample, 300) + + go func(input <-chan []record.RefSample, output chan<- []record.RefSample) { + unknown := h.processWALSamples(h.minValidTime.Load(), input, output) + unknownRefs.Add(unknown) + wg.Done() + }(inputs[i], outputs[i]) + } + + wg.Add(1) + exemplarsInput = make(chan record.RefExemplar, 300) + go func(input <-chan record.RefExemplar) { + defer wg.Done() + for e := range input { + ms := h.series.getByID(e.Ref) + if ms == nil { + unknownExemplarRefs.Inc() + continue + } + + if e.T < h.minValidTime.Load() { + continue + } + // At the moment the only possible error here is out of order exemplars, which we shouldn't see when + // replaying the WAL, so lets just log the error if it's not that type. + err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) + if err != nil && err == storage.ErrOutOfOrderExemplar { + level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) + } + } + }(exemplarsInput) + + go func() { + defer close(decoded) + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series := seriesPool.Get().([]record.RefSeries)[:0] + series, err = dec.Series(rec, series) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode series"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- series + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, err = dec.Samples(rec, samples) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode samples"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- samples + case record.Tombstones: + tstones := tstonesPool.Get().([]tombstones.Stone)[:0] + tstones, err = dec.Tombstones(rec, tstones) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode tombstones"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- tstones + case record.Exemplars: + exemplars := exemplarsPool.Get().([]record.RefExemplar)[:0] + exemplars, err = dec.Exemplars(rec, exemplars) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode exemplars"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- exemplars + default: + // Noop. + } + } + }() + + // The records are always replayed from the oldest to the newest. +Outer: + for d := range decoded { + switch v := d.(type) { + case []record.RefSeries: + for _, walSeries := range v { + mSeries, created, err := h.getOrCreateWithID(walSeries.Ref, walSeries.Labels.Hash(), walSeries.Labels) + if err != nil { + seriesCreationErr = err + break Outer + } + + if h.lastSeriesID.Load() < walSeries.Ref { + h.lastSeriesID.Store(walSeries.Ref) + } + + mmc := mmappedChunks[walSeries.Ref] + + if created { + // This is the first WAL series record for this series. + h.metrics.chunksCreated.Add(float64(len(mmc))) + h.metrics.chunks.Add(float64(len(mmc))) + mSeries.mmappedChunks = mmc + continue + } + + // There's already a different ref for this series. + // A duplicate series record is only possible when the old samples were already compacted into a block. + // Hence we can discard all the samples and m-mapped chunks replayed till now for this series. + + multiRef[walSeries.Ref] = mSeries.ref + + idx := mSeries.ref % uint64(n) + // It is possible that some old sample is being processed in processWALSamples that + // could cause race below. So we wait for the goroutine to empty input the buffer and finish + // processing all old samples after emptying the buffer. + inputs[idx] <- []record.RefSample{} + for len(inputs[idx]) != 0 { + time.Sleep(1 * time.Millisecond) + } + + // Checking if the new m-mapped chunks overlap with the already existing ones. + // This should never happen, but we have a check anyway to detect any + // edge cases that we might have missed. + if len(mSeries.mmappedChunks) > 0 && len(mmc) > 0 { + if overlapsClosedInterval( + mSeries.mmappedChunks[0].minTime, + mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime, + mmc[0].minTime, + mmc[len(mmc)-1].maxTime, + ) { + // The m-map chunks for the new series ref overlaps with old m-map chunks. + seriesCreationErr = errors.Errorf("overlapping m-mapped chunks for series %s", mSeries.lset.String()) + break Outer + } + } + + // Replacing m-mapped chunks with the new ones (could be empty). + h.metrics.chunksCreated.Add(float64(len(mmc))) + h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks))) + h.metrics.chunks.Add(float64(len(mmc) - len(mSeries.mmappedChunks))) + mSeries.mmappedChunks = mmc + + // Any samples replayed till now would already be compacted. Resetting the head chunk. + mSeries.nextAt = 0 + mSeries.headChunk = nil + mSeries.app = nil + h.updateMinMaxTime(mSeries.minTime(), mSeries.maxTime()) + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + seriesPool.Put(v) + case []record.RefSample: + samples := v + // We split up the samples into chunks of 5000 samples or less. + // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise + // cause thousands of very large in flight buffers occupying large amounts + // of unused memory. + for len(samples) > 0 { + m := 5000 + if len(samples) < m { + m = len(samples) + } + for i := 0; i < n; i++ { + var buf []record.RefSample + select { + case buf = <-outputs[i]: + default: + } + shards[i] = buf[:0] + } + for _, sam := range samples[:m] { + if r, ok := multiRef[sam.Ref]; ok { + sam.Ref = r + } + mod := sam.Ref % uint64(n) + shards[mod] = append(shards[mod], sam) + } + for i := 0; i < n; i++ { + inputs[i] <- shards[i] + } + samples = samples[m:] + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + samplesPool.Put(v) + case []tombstones.Stone: + for _, s := range v { + for _, itv := range s.Intervals { + if itv.Maxt < h.minValidTime.Load() { + continue + } + if m := h.series.getByID(s.Ref); m == nil { + unknownRefs.Inc() + continue + } + h.tombstones.AddInterval(s.Ref, itv) + } + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + tstonesPool.Put(v) + case []record.RefExemplar: + for _, e := range v { + exemplarsInput <- e + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + exemplarsPool.Put(v) + default: + panic(fmt.Errorf("unexpected decoded type: %T", d)) + } + } + + if decodeErr != nil { + return decodeErr + } + if seriesCreationErr != nil { + // Drain the channel to unblock the goroutine. + for range decoded { + } + return seriesCreationErr + } + + // Signal termination to each worker and wait for it to close its output channel. + for i := 0; i < n; i++ { + close(inputs[i]) + for range outputs[i] { + } + } + close(exemplarsInput) + wg.Wait() + + if r.Err() != nil { + return errors.Wrap(r.Err(), "read records") + } + + if unknownRefs.Load() > 0 || unknownExemplarRefs.Load() > 0 { + level.Warn(h.logger).Log("msg", "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load()) + } + return nil +} + +// processWALSamples adds a partition of samples it receives to the head and passes +// them on to other workers. +// Samples before the mint timestamp are discarded. +func (h *Head) processWALSamples( + minValidTime int64, + input <-chan []record.RefSample, output chan<- []record.RefSample, +) (unknownRefs uint64) { + defer close(output) + + // Mitigate lock contention in getByID. + refSeries := map[uint64]*memSeries{} + + mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) + + for samples := range input { + for _, s := range samples { + if s.T < minValidTime { + continue + } + ms := refSeries[s.Ref] + if ms == nil { + ms = h.series.getByID(s.Ref) + if ms == nil { + unknownRefs++ + continue + } + refSeries[s.Ref] = ms + } + if _, chunkCreated := ms.append(s.T, s.V, 0, h.chunkDiskMapper); chunkCreated { + h.metrics.chunksCreated.Inc() + h.metrics.chunks.Inc() + } + if s.T > maxt { + maxt = s.T + } + if s.T < mint { + mint = s.T + } + } + output <- samples + } + h.updateMinMaxTime(mint, maxt) + + return unknownRefs +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index a7b9e57ef4..b46c107e9c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -1587,6 +1587,7 @@ func (r *Reader) LabelValueFor(id uint64, label string) (string, error) { } // Series reads the series with the given ID and writes its labels and chunks into lbls and chks. +// Chunks will be skipped if chks is nil. func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error { offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded @@ -1707,6 +1708,33 @@ func (r *Reader) SortedPostings(p Postings) Postings { return p } +// ShardedPostings returns a postings list filtered by the provided shardIndex out of shardCount. +func (r *Reader) ShardedPostings(p Postings, shardIndex, shardCount uint64) Postings { + var ( + out = make([]uint64, 0, 128) + bufLbls = make(labels.Labels, 0, 10) + ) + + for p.Next() { + id := p.At() + + // Get the series labels (no chunks). + err := r.Series(id, &bufLbls, nil) + if err != nil { + return ErrPostings(errors.Errorf("series %d not found", id)) + } + + // Check if the series belong to the shard. + if bufLbls.Hash()%shardCount != shardIndex { + continue + } + + out = append(out, id) + } + + return NewListPostings(out) +} + // Size returns the size of an index file. func (r *Reader) Size() int64 { return int64(r.b.Len()) @@ -1820,9 +1848,12 @@ func (dec *Decoder) LabelValueFor(b []byte, label string) (string, error) { } // Series decodes a series entry from the given byte slice into lset and chks. +// Skips reading chunks metadata if chks is nil. func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error { *lbls = (*lbls)[:0] - *chks = (*chks)[:0] + if chks != nil { + *chks = (*chks)[:0] + } d := encoding.Decbuf{B: b} @@ -1848,11 +1879,16 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e *lbls = append(*lbls, labels.Label{Name: ln, Value: lv}) } + // Skip reading chunks metadata if chks is nil. + if chks == nil { + return d.Err() + } + // Read the chunks meta data. k = d.Uvarint() if k == 0 { - return nil + return d.Err() } t0 := d.Varint64() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go index 523f938968..ca6ce1980f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/isolation.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/isolation.go @@ -144,7 +144,7 @@ func (i *isolation) TraverseOpenReads(f func(s *isolationState) bool) { // newAppendID increments the transaction counter and returns a new transaction // ID. The first ID returned is 1. -// Also returns the low watermark, to keep lock/unlock operations down +// Also returns the low watermark, to keep lock/unlock operations down. func (i *isolation) newAppendID() (uint64, uint64) { i.appendMtx.Lock() defer i.appendMtx.Unlock() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 18a1fd20a5..0cab141ee0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -127,6 +127,9 @@ func (q *blockQuerier) Select(sortSeries bool, hints *storage.SelectHints, ms .. if err != nil { return storage.ErrSeriesSet(err) } + if hints != nil && hints.ShardCount > 0 { + p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) + } if sortSeries { p = q.index.SortedPostings(p) } @@ -168,6 +171,9 @@ func (q *blockChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, if err != nil { return storage.ErrChunkSeriesSet(err) } + if hints != nil && hints.ShardCount > 0 { + p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) + } if sortSeries { p = q.index.SortedPostings(p) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go index fda5ad1e62..accefa7aaf 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go @@ -48,7 +48,10 @@ type WriteTo interface { Append([]record.RefSample) bool AppendExemplars([]record.RefExemplar) bool StoreSeries([]record.RefSeries, int) - // SeriesReset is called after reading a checkpoint to allow the deletion + // Next two methods are intended for garbage-collection: first we call + // UpdateSeriesSegment on all current series + UpdateSeriesSegment([]record.RefSeries, int) + // Then SeriesReset is called to allow the deletion // of all series created in a segment lower than the argument. SeriesReset(int) } @@ -234,7 +237,7 @@ func (w *Watcher) Run() error { } if err == nil { - if err = w.readCheckpoint(lastCheckpoint); err != nil { + if err = w.readCheckpoint(lastCheckpoint, (*Watcher).readSegment); err != nil { return errors.Wrap(err, "readCheckpoint") } } @@ -454,7 +457,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) - if err = w.readCheckpoint(dir); err != nil { + if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { return errors.Wrap(err, "readCheckpoint") } @@ -463,6 +466,8 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { return nil } +// Read from a segment and pass the details to w.writer. +// Also used with readCheckpoint - implements segmentReadFn. func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { var ( dec record.Decoder @@ -538,6 +543,39 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) } +// Go through all series in a segment updating the segmentNum, so we can delete older series. +// Used with readCheckpoint - implements segmentReadFn. +func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error { + var ( + dec record.Decoder + series []record.RefSeries + ) + for r.Next() && !isClosed(w.quit) { + rec := r.Record() + w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc() + + switch dec.Type(rec) { + case record.Series: + series, err := dec.Series(rec, series[:0]) + if err != nil { + w.recordDecodeFailsMetric.Inc() + return err + } + w.writer.UpdateSeriesSegment(series, segmentNum) + + // Ignore these; we're only interested in series. + case record.Samples: + case record.Exemplars: + case record.Tombstones: + + default: + // Could be corruption, or reading from a WAL from a newer Prometheus. + w.recordDecodeFailsMetric.Inc() + } + } + return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) +} + func (w *Watcher) SetStartTime(t time.Time) { w.startTime = t w.startTimestamp = timestamp.FromTime(t) @@ -556,8 +594,10 @@ func recordType(rt record.Type) string { } } +type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) error + // Read all the series records from a Checkpoint directory. -func (w *Watcher) readCheckpoint(checkpointDir string) error { +func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) error { level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { @@ -582,7 +622,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string) error { defer sr.Close() r := NewLiveReader(w.logger, w.readerMetrics, sr) - if err := w.readSegment(r, index, false); errors.Cause(err) != io.EOF && err != nil { + if err := readFn(w, r, index, false); errors.Cause(err) != io.EOF && err != nil { return errors.Wrap(err, "readSegment") } diff --git a/vendor/github.com/prometheus/prometheus/util/osutil/hostname.go b/vendor/github.com/prometheus/prometheus/util/osutil/hostname.go new file mode 100644 index 0000000000..224dffe7c4 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/osutil/hostname.go @@ -0,0 +1,63 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package osutil + +import ( + "encoding" + "net" + "os" +) + +// GetFQDN returns a FQDN if it's possible, otherwise falls back to hostname. +func GetFQDN() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", err + } + + ips, err := net.LookupIP(hostname) + if err != nil { + // Return the system hostname if we can't look up the IP address. + return hostname, nil + } + + lookup := func(ipStr encoding.TextMarshaler) (string, error) { + ip, err := ipStr.MarshalText() + if err != nil { + return "", err + } + hosts, err := net.LookupAddr(string(ip)) + if err != nil || len(hosts) == 0 { + return "", err + } + return hosts[0], nil + } + + for _, addr := range ips { + if ip := addr.To4(); ip != nil { + if fqdn, err := lookup(ip); err == nil { + return fqdn, nil + } + + } + + if ip := addr.To16(); ip != nil { + if fqdn, err := lookup(ip); err == nil { + return fqdn, nil + } + + } + } + return hostname, nil +} diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore new file mode 100644 index 0000000000..9c1d986118 --- /dev/null +++ b/vendor/github.com/spf13/afero/.gitignore @@ -0,0 +1,2 @@ +sftpfs/file1 +sftpfs/test/ diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml index 0637db726d..fdaa99980c 100644 --- a/vendor/github.com/spf13/afero/.travis.yml +++ b/vendor/github.com/spf13/afero/.travis.yml @@ -1,21 +1,22 @@ -sudo: false -language: go - -go: - - 1.9 - - "1.10" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build - - go test -race -v ./... - +sudo: false +language: go + +go: + - "1.13" + - "1.14" + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go build -v ./... + - go test -count=1 -cover -race -v ./... + - go vet ./... + - FILES=$(gofmt -s -l . zipfs sftpfs mem); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index 0c9b04b53f..16b06f2ba0 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -6,7 +6,7 @@ A FileSystem Abstraction System for Go # Overview -Afero is an filesystem framework providing a simple, uniform and universal API +Afero is a filesystem framework providing a simple, uniform and universal API interacting with any filesystem, as an abstraction layer providing interfaces, types and methods. Afero has an exceptionally clean interface and simple design without needless constructors or initialization methods. @@ -18,7 +18,7 @@ and benefit of the os and ioutil packages. Afero provides significant improvements over using the os package alone, most notably the ability to create mock and testing filesystems without relying on the disk. -It is suitable for use in a any situation where you would consider using the OS +It is suitable for use in any situation where you would consider using the OS package as it provides an additional abstraction that makes it easy to use a memory backed file system during testing. It also adds support for the http filesystem for full interoperability. @@ -41,8 +41,8 @@ Afero is easy to use and easier to adopt. A few different ways you could use Afero: -* Use the interfaces alone to define you own file system. -* Wrap for the OS packages. +* Use the interfaces alone to define your own file system. +* Wrapper for the OS packages. * Define different filesystems for different parts of your application. * Use Afero for mock filesystems while testing @@ -380,7 +380,6 @@ The following is a short list of possible backends we hope someone will implement: * SSH -* ZIP * TAR * S3 @@ -406,28 +405,7 @@ Googles very well. ## Release Notes -* **0.10.0** 2015.12.10 - * Full compatibility with Windows - * Introduction of afero utilities - * Test suite rewritten to work cross platform - * Normalize paths for MemMapFs - * Adding Sync to the file interface - * **Breaking Change** Walk and ReadDir have changed parameter order - * Moving types used by MemMapFs to a subpackage - * General bugfixes and improvements -* **0.9.0** 2015.11.05 - * New Walk function similar to filepath.Walk - * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC - * MemMapFs.Remove now really deletes the file - * InMemoryFile.Readdir and Readdirnames work correctly - * InMemoryFile functions lock it for concurrent access - * Test suite improvements -* **0.8.0** 2014.10.28 - * First public version - * Interfaces feel ready for people to build using - * Interfaces satisfy all known uses - * MemMapFs passes the majority of the OS test suite - * OsFs passes the majority of the OS test suite +See the [Releases Page](https://github.com/spf13/afero/releases). ## Contributing diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml index a633ad500c..5d2f34bf16 100644 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -10,6 +10,6 @@ build_script: go get -v github.com/spf13/afero/... - go build github.com/spf13/afero + go build -v github.com/spf13/afero/... test_script: -- cmd: go test -race -v github.com/spf13/afero/... +- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go index 616ff8ff74..3a14b833e4 100644 --- a/vendor/github.com/spf13/afero/basepath.go +++ b/vendor/github.com/spf13/afero/basepath.go @@ -177,4 +177,30 @@ func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { return fi, false, err } +func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { + oldname, err := b.RealPath(oldname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + newname, err = b.RealPath(newname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + if linker, ok := b.source.(Linker); ok { + return linker.SymlinkIfPossible(oldname, newname) + } + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { + name, err := b.RealPath(name) + if err != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: err} + } + if reader, ok := b.source.(LinkReader); ok { + return reader.ReadlinkIfPossible(name) + } + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + // vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go index 5728243d96..18b45824be 100644 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build darwin openbsd freebsd netbsd dragonfly +// +build aix darwin openbsd freebsd netbsd dragonfly package afero diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go index 968fc2783e..2b850e4ddb 100644 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -15,6 +15,7 @@ // +build !freebsd // +build !dragonfly // +build !netbsd +// +build !aix package afero diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go index e8108a851e..96b7701261 100644 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -117,6 +117,26 @@ func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) return fi, false, err } +func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { + if slayer, ok := u.layer.(Linker); ok { + return slayer.SymlinkIfPossible(oldname, newname) + } + + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { + if rlayer, ok := u.layer.(LinkReader); ok { + return rlayer.ReadlinkIfPossible(name) + } + + if rbase, ok := u.base.(LinkReader); ok { + return rbase.ReadlinkIfPossible(name) + } + + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + func (u *CopyOnWriteFs) isNotExist(err error) bool { if e, ok := err.(*os.PathError); ok { err = e.Err diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod index 0868550995..abe4fe1cfd 100644 --- a/vendor/github.com/spf13/afero/go.mod +++ b/vendor/github.com/spf13/afero/go.mod @@ -1,3 +1,9 @@ module github.com/spf13/afero -require golang.org/x/text v0.3.0 +require ( + github.com/pkg/sftp v1.10.1 + golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 + golang.org/x/text v0.3.3 +) + +go 1.13 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum index 6bad37b2a7..89d9bfbc41 100644 --- a/vendor/github.com/spf13/afero/go.sum +++ b/vendor/github.com/spf13/afero/go.sum @@ -1,2 +1,29 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go index 5c3a3d8fff..a403133e27 100644 --- a/vendor/github.com/spf13/afero/ioutil.go +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -22,6 +22,7 @@ import ( "path/filepath" "sort" "strconv" + "strings" "sync" "time" ) @@ -147,7 +148,7 @@ func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) } -func nextSuffix() string { +func nextRandom() string { randmu.Lock() r := rand if r == 0 { @@ -159,27 +160,36 @@ func nextSuffix() string { return strconv.Itoa(int(1e9 + r%1e9))[1:] } -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *File. +// TempFile creates a new temporary file in the directory dir, +// opens the file for reading and writing, and returns the resulting *os.File. +// The filename is generated by taking pattern and adding a random +// string to the end. If pattern includes a "*", the random string +// replaces the last "*". // If dir is the empty string, TempFile uses the default directory // for temporary files (see os.TempDir). // Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility // to remove the file when no longer needed. -func (a Afero) TempFile(dir, prefix string) (f File, err error) { - return TempFile(a.Fs, dir, prefix) +func (a Afero) TempFile(dir, pattern string) (f File, err error) { + return TempFile(a.Fs, dir, pattern) } -func TempFile(fs Fs, dir, prefix string) (f File, err error) { +func TempFile(fs Fs, dir, pattern string) (f File, err error) { if dir == "" { dir = os.TempDir() } + var prefix, suffix string + if pos := strings.LastIndex(pattern, "*"); pos != -1 { + prefix, suffix = pattern[:pos], pattern[pos+1:] + } else { + prefix = pattern + } + nconflict := 0 for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) + name := filepath.Join(dir, prefix+nextRandom()+suffix) f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if os.IsExist(err) { if nconflict++; nconflict > 10 { @@ -211,7 +221,7 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) { nconflict := 0 for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) + try := filepath.Join(dir, prefix+nextRandom()) err = fs.Mkdir(try, 0700) if os.IsExist(err) { if nconflict++; nconflict > 10 { diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go index c18a87fb71..7db4b7de6e 100644 --- a/vendor/github.com/spf13/afero/match.go +++ b/vendor/github.com/spf13/afero/match.go @@ -106,5 +106,5 @@ func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { // recognized by Match. func hasMeta(path string) bool { // TODO(niemeyer): Should other magic characters be added here? - return strings.IndexAny(path, "*?[") >= 0 + return strings.ContainsAny(path, "*?[") } diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 7af2fb56ff..699f1fb024 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -193,8 +193,11 @@ func (f *File) Read(b []byte) (n int, err error) { } func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + prev := atomic.LoadInt64(&f.at) atomic.StoreInt64(&f.at, off) - return f.Read(b) + n, err = f.Read(b) + atomic.StoreInt64(&f.at, prev) + return } func (f *File) Truncate(size int64) error { @@ -233,6 +236,9 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { } func (f *File) Write(b []byte) (n int, err error) { + if f.closed == true { + return 0, ErrFileClosed + } if f.readOnly { return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} } diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index 09498e70fb..bbcc238126 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -25,6 +25,8 @@ import ( "github.com/spf13/afero/mem" ) +const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() + type MemMapFs struct { mu sync.RWMutex data map[string]*mem.FileData @@ -40,7 +42,9 @@ func (m *MemMapFs) getData() map[string]*mem.FileData { m.data = make(map[string]*mem.FileData) // Root should always exist, right? // TODO: what about windows? - m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + root := mem.CreateDir(FilePathSeparator) + mem.SetMode(root, os.ModeDir|0755) + m.data[FilePathSeparator] = root }) return m.data } @@ -52,7 +56,7 @@ func (m *MemMapFs) Create(name string) (File, error) { m.mu.Lock() file := mem.CreateFile(name) m.getData()[name] = file - m.registerWithParent(file) + m.registerWithParent(file, 0) m.mu.Unlock() return mem.NewFileHandle(file), nil } @@ -83,14 +87,14 @@ func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { return pfile } -func (m *MemMapFs) registerWithParent(f *mem.FileData) { +func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { if f == nil { return } parent := m.findParent(f) if parent == nil { pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, 0777) + err := m.lockfreeMkdir(pdir, perm) if err != nil { //log.Println("Mkdir error:", err) return @@ -119,13 +123,15 @@ func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { } } else { item := mem.CreateDir(name) + mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item - m.registerWithParent(item) + m.registerWithParent(item, perm) } return nil } func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + perm &= chmodBits name = normalizePath(name) m.mu.RLock() @@ -137,13 +143,12 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { m.mu.Lock() item := mem.CreateDir(name) + mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item - m.registerWithParent(item) + m.registerWithParent(item, perm) m.mu.Unlock() - m.Chmod(name, perm|os.ModeDir) - - return nil + return m.setFileMode(name, perm|os.ModeDir) } func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { @@ -210,8 +215,12 @@ func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { } func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + perm &= chmodBits chmod := false file, err := m.openWrite(name) + if err == nil && (flag&os.O_EXCL > 0) { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} + } if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { file, err = m.Create(name) chmod = true @@ -237,7 +246,7 @@ func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, erro } } if chmod { - m.Chmod(name, perm) + return file, m.setFileMode(name, perm) } return file, nil } @@ -269,7 +278,7 @@ func (m *MemMapFs) RemoveAll(path string) error { m.mu.RLock() defer m.mu.RUnlock() - for p, _ := range m.getData() { + for p := range m.getData() { if strings.HasPrefix(p, path) { m.mu.RUnlock() m.mu.Lock() @@ -299,7 +308,7 @@ func (m *MemMapFs) Rename(oldname, newname string) error { delete(m.getData(), oldname) mem.ChangeFileName(fileData, newname) m.getData()[newname] = fileData - m.registerWithParent(fileData) + m.registerWithParent(fileData, 0) m.mu.Unlock() m.mu.RLock() } else { @@ -318,6 +327,21 @@ func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { } func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + mode &= chmodBits + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits + + mode = prevOtherBits | mode + return m.setFileMode(name, mode) +} + +func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { name = normalizePath(name) m.mu.RLock() diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go index 13cc1b84c9..4761db5d72 100644 --- a/vendor/github.com/spf13/afero/os.go +++ b/vendor/github.com/spf13/afero/os.go @@ -99,3 +99,11 @@ func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { fi, err := os.Lstat(name) return fi, true, err } + +func (OsFs) SymlinkIfPossible(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (OsFs) ReadlinkIfPossible(name string) (string, error) { + return os.Readlink(name) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go index c6376ec373..f94b181b6c 100644 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -44,6 +44,18 @@ func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { return fi, false, err } +func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { + if srdr, ok := r.source.(LinkReader); ok { + return srdr.ReadlinkIfPossible(name) + } + + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + func (r *ReadOnlyFs) Rename(o, n string) error { return syscall.EPERM } diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go index 9d92dbc051..c8fc008670 100644 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -126,6 +126,9 @@ func (r *RegexpFs) Open(name string) (File, error) { } } f, err := r.source.Open(name) + if err != nil { + return nil, err + } return &RegexpFile{f: f, re: r.re}, nil } diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go new file mode 100644 index 0000000000..d1c6ea53d9 --- /dev/null +++ b/vendor/github.com/spf13/afero/symlink.go @@ -0,0 +1,55 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" +) + +// Symlinker is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It indicates support for 3 symlink related interfaces that implement the +// behaviors of the os methods: +// - Lstat +// - Symlink, and +// - Readlink +type Symlinker interface { + Lstater + Linker + LinkReader +} + +// Linker is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, +// or the filesystem otherwise supports Symlink's. +type Linker interface { + SymlinkIfPossible(oldname, newname string) error +} + +// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system +// does not support Symlink's either directly or through its delegated filesystem. +// As expressed by support for the Linker interface. +var ErrNoSymlink = errors.New("symlink not supported") + +// LinkReader is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +type LinkReader interface { + ReadlinkIfPossible(name string) (string, error) +} + +// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system +// does not support the readlink operation either directly or through its delegated filesystem. +// As expressed by support for the LinkReader interface. +var ErrNoReadlink = errors.New("readlink not supported") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 0ccbe9b4c2..07ac694aef 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -816,7 +816,7 @@ func (sc *serverConn) serve() { }) sc.unackedSettings++ - // Each connection starts with intialWindowSize inflow tokens. + // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 7bd4b9c197..b97adff7d0 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -264,9 +264,8 @@ type ClientConn struct { peerMaxHeaderListSize uint64 initialWindowSize uint32 - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred @@ -913,46 +912,6 @@ func (cc *ClientConn) closeForLostPing() error { return cc.closeForError(err) } -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") @@ -1295,11 +1254,35 @@ var ( errReqBodyTooLong = errors.New("http2: request body larger than specified content length") ) +// frameScratchBufferLen returns the length of a buffer to use for +// outgoing request bodies to read/write to/from. +// +// It returns max(1, min(peer's advertised max frame size, +// Request.ContentLength+1, 512KB)). +func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { + const max = 512 << 10 + n := int64(maxFrameSize) + if n > max { + n = max + } + if cl := actualContentLength(cs.req); cl != -1 && cl+1 < n { + // Add an extra byte past the declared content-length to + // give the caller's Request.Body io.Reader a chance to + // give us more bytes than they declared, so we can catch it + // early. + n = cl + 1 + } + if n < 1 { + return 1 + } + return int(n) // doesn't truncate; max is 512K +} + +var bufPool sync.Pool // of *[]byte + func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { cc := cs.cc sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) defer func() { traceWroteRequest(cs.trace, err) @@ -1318,9 +1301,24 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( remainLen := actualContentLength(req) hasContentLen := remainLen != -1 + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + // Scratch buffer for reading into & writing from. + scratchLen := cs.frameScratchBufferLen(maxFrameSize) + var buf []byte + if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { + defer bufPool.Put(bp) + buf = *bp + } else { + buf = make([]byte, scratchLen) + defer bufPool.Put(&buf) + } + var sawEOF bool for !sawEOF { - n, err := body.Read(buf[:len(buf)-1]) + n, err := body.Read(buf[:len(buf)]) if hasContentLen { remainLen -= int64(n) if remainLen == 0 && err == nil { @@ -1331,8 +1329,9 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( // to send the END_STREAM bit early, double-check that we're actually // at EOF. Subsequent reads should return (0, EOF) at this point. // If either value is different, we return an error in one of two ways below. + var scratch [1]byte var n1 int - n1, err = body.Read(buf[n:]) + n1, err = body.Read(scratch[:]) remainLen -= int64(n1) } if remainLen < 0 { @@ -1402,10 +1401,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( } } - cc.mu.Lock() - maxFrameSize := int(cc.maxFrameSize) - cc.mu.Unlock() - cc.wmu.Lock() defer cc.wmu.Unlock() diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index b241c728a6..8e6a57ce96 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -4,9 +4,9 @@ // Package google provides support for making OAuth2 authorized and authenticated // HTTP requests to Google APIs. It supports the Web server flow, client-side -// credentials, service accounts, Google Compute Engine service accounts, Google -// App Engine service accounts and workload identity federation from non-Google -// cloud platforms. +// credentials, service accounts, Google Compute Engine service accounts, +// Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. // // A brief overview of the package follows. For more information, please read // https://developers.google.com/accounts/docs/OAuth2 diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index fbcefb474e..a5a5423c65 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -13,7 +13,6 @@ import ( "encoding/json" "errors" "fmt" - "golang.org/x/oauth2" "io" "io/ioutil" "net/http" @@ -23,6 +22,8 @@ import ( "sort" "strings" "time" + + "golang.org/x/oauth2" ) type awsSecurityCredentials struct { @@ -343,6 +344,9 @@ func (cs *awsCredentialSource) getRegion() (string, error) { if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { return envAwsRegion, nil } + if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { + return envAwsRegion, nil + } if cs.RegionURL == "" { return "", errors.New("oauth2/google: unable to determine AWS region") diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 1a6e93cec7..a4d45d9202 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -20,15 +20,34 @@ var now = func() time.Time { // Config stores the configuration for fetching tokens with external credentials. type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec + // e.g. `urn:ietf:params:oauth:token-type:jwt`. SubjectTokenType string + // TokenURL is the STS token exchange endpoint. TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. ServiceAccountImpersonationURL string + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using client_id as username and client_secret as password. ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. CredentialSource CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project which overrides the project associated with the credentials. QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. Scopes []string } @@ -66,6 +85,8 @@ type format struct { } // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +// Either the File or the URL field should be filled, depending on the kind of credential in question. +// The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { File string `json:"file"` @@ -107,7 +128,7 @@ type baseCredentialSource interface { subjectToken() (string, error) } -// tokenSource is the source that handles external credentials. +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. type tokenSource struct { ctx context.Context conf *Config diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go index feccf8b68e..62c2e36cc1 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go @@ -19,6 +19,9 @@ type clientAuthentication struct { ClientSecret string } +// InjectAuthentication is used to add authentication to a Secure Token Service exchange +// request. It modifies either the passed url.Values or http.Header depending on the desired +// authentication format. func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go index 1d29c467f7..1f6009b38f 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go @@ -36,7 +36,7 @@ type impersonateTokenSource struct { scopes []string } -// Token performs the exchange to get a temporary service account +// Token performs the exchange to get a temporary service account token to allow access to GCP. func (its impersonateTokenSource) Token() (*oauth2.Token, error) { reqBody := generateAccessTokenReq{ Lifetime: "3600s", diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go index b0fdb3a888..67d97b9904 100644 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -7,6 +7,7 @@ package google import ( "crypto/rsa" "fmt" + "strings" "time" "golang.org/x/oauth2" @@ -24,6 +25,28 @@ import ( // optimization supported by a few Google services. // Unless you know otherwise, you should use JWTConfigFromJSON instead. func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, audience, nil) +} + +// JWTAccessTokenSourceWithScope uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The scope is typically a list of URLs that specifies the scope of the +// credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceWithScope(jsonKey []byte, scope ...string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, "", scope) +} + +func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.TokenSource, error) { + if len(scopes) == 0 && audience == "" { + return nil, fmt.Errorf("google: missing scope/audience for JWT access token") + } + cfg, err := JWTConfigFromJSON(jsonKey) if err != nil { return nil, fmt.Errorf("google: could not parse JSON key: %v", err) @@ -35,6 +58,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token ts := &jwtAccessTokenSource{ email: cfg.Email, audience: audience, + scopes: scopes, pk: pk, pkID: cfg.PrivateKeyID, } @@ -47,6 +71,7 @@ func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.Token type jwtAccessTokenSource struct { email, audience string + scopes []string pk *rsa.PrivateKey pkID string } @@ -54,12 +79,14 @@ type jwtAccessTokenSource struct { func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { iat := time.Now() exp := iat.Add(time.Hour) + scope := strings.Join(ts.scopes, " ") cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), } hdr := &jws.Header{ Algorithm: "RS256", diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 3f670faba3..6e6afcaa1d 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -563,6 +563,7 @@ ccflags="$@" $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /^AUDIT_RECORD_MAGIC/ && diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 9945e5f965..23f6b57606 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,6 +13,7 @@ package unix import ( + "fmt" "runtime" "syscall" "unsafe" @@ -398,6 +399,38 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } + + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) /* diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 991996b609..5bb48ef54e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1262,6 +1262,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e644eaf5e7..11e5709797 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1262,6 +1262,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIMESTAMP_MONOTONIC = 0x4 + SEEK_CUR = 0x1 + SEEK_DATA = 0x4 + SEEK_END = 0x2 + SEEK_HOLE = 0x3 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 9c7c5e1654..440900112c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1297,6 +1297,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index b265abb25f..64520d3122 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1298,6 +1298,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 3df99f285f..99e9a0e06e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1276,6 +1276,11 @@ const ( SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 218d39906d..4c83771149 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -1298,6 +1298,11 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index c3fa22486e..52f5bbc14c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2284,6 +2284,12 @@ const ( SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 SECURITYFS_MAGIC = 0x73636673 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_MAX = 0x4 + SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 2673e6c590..4c8dc0ba2e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -535,3 +535,107 @@ type CtlInfo struct { Id uint32 Name [96]byte } + +const SizeofKinfoProc = 0x288 + +type Eproc struct { + Paddr uintptr + Sess uintptr + Pcred Pcred + Ucred Ucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess uintptr + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]int8 + Spare [4]int32 + _ [4]byte +} + +type ExternProc struct { + P_starttime Timeval + P_vmspace *Vmspace + P_sigacts uintptr + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + User_stack *int8 + Exit_thread *byte + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + P_wchan *byte + P_wmesg *int8 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + P_tracep uintptr + P_siglist int32 + P_textvp uintptr + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + P_pgrp uintptr + P_addr uintptr + P_xstat uint16 + P_acflag uint16 + P_ru *Rusage +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Pcred struct { + Pc_lock [72]int8 + Pc_ucred uintptr + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + _ [4]byte +} + +type Ucred struct { + Ref int32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 1465cbcffe..96f0e6ae2a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -535,3 +535,107 @@ type CtlInfo struct { Id uint32 Name [96]byte } + +const SizeofKinfoProc = 0x288 + +type Eproc struct { + Paddr uintptr + Sess uintptr + Pcred Pcred + Ucred Ucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess uintptr + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]int8 + Spare [4]int32 + _ [4]byte +} + +type ExternProc struct { + P_starttime Timeval + P_vmspace *Vmspace + P_sigacts uintptr + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + User_stack *int8 + Exit_thread *byte + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + P_wchan *byte + P_wmesg *int8 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + P_tracep uintptr + P_siglist int32 + P_textvp uintptr + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + P_pgrp uintptr + P_addr uintptr + P_xstat uint16 + P_acflag uint16 + P_ru *Rusage +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Pcred struct { + Pc_lock [72]int8 + Pc_ucred uintptr + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + _ [4]byte +} + +type Ucred struct { + Ref int32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 72887abe55..c9d7eb41e3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1773,6 +1773,8 @@ const ( NFPROTO_NUMPROTO = 0xd ) +const SO_ORIGINAL_DST = 0x50 + type Nfgenmsg struct { Nfgen_family uint8 Version uint8 diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 1f733398ee..17f03312df 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -680,7 +680,7 @@ const ( WTD_CHOICE_CERT = 5 WTD_STATEACTION_IGNORE = 0x00000000 - WTD_STATEACTION_VERIFY = 0x00000010 + WTD_STATEACTION_VERIFY = 0x00000001 WTD_STATEACTION_CLOSE = 0x00000002 WTD_STATEACTION_AUTO_CACHE = 0x00000003 WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004 diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index c0f7a5733b..0cfcc8463c 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -364,20 +364,13 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, last = now } - // Avoid making delta overflow below when last is very old. - maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) - elapsed := now.Sub(last) - if elapsed > maxElapsed { - elapsed = maxElapsed - } - // Calculate the new number of tokens, due to time that passed. + elapsed := now.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens } @@ -385,15 +378,11 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, // of time it takes to accumulate them at a rate of limit tokens per second. func (limit Limit) durationFromTokens(tokens float64) time.Duration { seconds := tokens / float64(limit) - return time.Nanosecond * time.Duration(1e9*seconds) + return time.Duration(float64(time.Second) * seconds) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens // which could be accumulated during that duration at a rate of limit tokens per second. func (limit Limit) tokensFromDuration(d time.Duration) float64 { - // Split the integer and fractional parts ourself to minimize rounding errors. - // See golang.org/issues/34861. - sec := float64(d/time.Second) * float64(limit) - nsec := float64(d%time.Second) * float64(limit) - return sec + nsec/1e9 + return d.Seconds() * float64(limit) } diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index b921eac736..a2907aa026 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -948,7 +948,7 @@ "parameterOrder": [], "parameters": { "filter": { - "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter strings: | Filter | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` | : : has the value `red` and its : : : label`size` has the value : : : `big`. : | lifecycleState:DELETE_REQUESTED | Only show projects that are | : : pending deletion. : If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", + "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` and its label `size` has the value `big`.| | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", "location": "query", "type": "string" }, @@ -1171,7 +1171,7 @@ } } }, - "revision": "20210331", + "revision": "20210613", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 298a2a3692..15b3f1bf94 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -2464,7 +2464,7 @@ func (c *FoldersClearOrgPolicyCall) Header() http.Header { func (c *FoldersClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2611,7 +2611,7 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Header() http.Header { func (c *FoldersGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2758,7 +2758,7 @@ func (c *FoldersGetOrgPolicyCall) Header() http.Header { func (c *FoldersGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2902,7 +2902,7 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *FoldersListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3069,7 +3069,7 @@ func (c *FoldersListOrgPoliciesCall) Header() http.Header { func (c *FoldersListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3236,7 +3236,7 @@ func (c *FoldersSetOrgPolicyCall) Header() http.Header { func (c *FoldersSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3378,7 +3378,7 @@ func (c *LiensCreateCall) Header() http.Header { func (c *LiensCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3509,7 +3509,7 @@ func (c *LiensDeleteCall) Header() http.Header { func (c *LiensDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3656,7 +3656,7 @@ func (c *LiensGetCall) Header() http.Header { func (c *LiensGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3827,7 +3827,7 @@ func (c *LiensListCall) Header() http.Header { func (c *LiensListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4001,7 +4001,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4139,7 +4139,7 @@ func (c *OrganizationsClearOrgPolicyCall) Header() http.Header { func (c *OrganizationsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4294,7 +4294,7 @@ func (c *OrganizationsGetCall) Header() http.Header { func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4437,7 +4437,7 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4587,7 +4587,7 @@ func (c *OrganizationsGetIamPolicyCall) Header() http.Header { func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4734,7 +4734,7 @@ func (c *OrganizationsGetOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4878,7 +4878,7 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Header() http.Heade func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5045,7 +5045,7 @@ func (c *OrganizationsListOrgPoliciesCall) Header() http.Header { func (c *OrganizationsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5209,7 +5209,7 @@ func (c *OrganizationsSearchCall) Header() http.Header { func (c *OrganizationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5367,7 +5367,7 @@ func (c *OrganizationsSetIamPolicyCall) Header() http.Header { func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5512,7 +5512,7 @@ func (c *OrganizationsSetOrgPolicyCall) Header() http.Header { func (c *OrganizationsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5659,7 +5659,7 @@ func (c *OrganizationsTestIamPermissionsCall) Header() http.Header { func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5802,7 +5802,7 @@ func (c *ProjectsClearOrgPolicyCall) Header() http.Header { func (c *ProjectsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5951,7 +5951,7 @@ func (c *ProjectsCreateCall) Header() http.Header { func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6089,7 +6089,7 @@ func (c *ProjectsDeleteCall) Header() http.Header { func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6233,7 +6233,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6373,7 +6373,7 @@ func (c *ProjectsGetAncestryCall) Header() http.Header { func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6520,7 +6520,7 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *ProjectsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6671,7 +6671,7 @@ func (c *ProjectsGetIamPolicyCall) Header() http.Header { func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6817,7 +6817,7 @@ func (c *ProjectsGetOrgPolicyCall) Header() http.Header { func (c *ProjectsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6947,25 +6947,25 @@ func (r *ProjectsService) List() *ProjectsListCall { // query will return results that match any of the fields. Some eligible // fields for filtering are: + `name` + `id` + `labels.` (where *key* is // the name of a label) + `parent.type` + `parent.id` + `lifecycleState` -// Some examples of filter strings: | Filter | Description | +// Some examples of filter queries: | Query | Description | // |------------------|-------------------------------------------------- // ---| | name:how* | The project's name starts with "how". | | // name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | // Equivalent to above. | | NAME:howl | Equivalent to above. | | // labels.color:* | The project has the label `color`. | | // labels.color:red | The project's label `color` has the value `red`. | -// | labels.color:red labels.size:big | The project's label `color` | : -// : has the value `red` and its : : : label`size` has the value : : : -// `big`. : | lifecycleState:DELETE_REQUESTED | Only show projects that -// are | : : pending deletion. : If no filter is specified, the call -// will return projects for which the user has the -// `resourcemanager.projects.get` permission. NOTE: To perform a -// by-parent query (eg., what projects are directly in a Folder), the -// caller must have the `resourcemanager.projects.list` permission on -// the parent and the filter must contain both a `parent.type` and a -// `parent.id` restriction (example: "parent.type:folder -// parent.id:123"). In this case an alternate search index is used which -// provides more consistent results. +// | labels.color:red labels.size:big | The project's label `color` has +// the value `red` and its label `size` has the value `big`.| | +// lifecycleState:DELETE_REQUESTED | Only show projects that are pending +// deletion.| If no filter is specified, the call will return projects +// for which the user has the `resourcemanager.projects.get` permission. +// NOTE: To perform a by-parent query (eg., what projects are directly +// in a Folder), the caller must have the +// `resourcemanager.projects.list` permission on the parent and the +// filter must contain both a `parent.type` and a `parent.id` +// restriction (example: "parent.type:folder parent.id:123"). In this +// case an alternate search index is used which provides more consistent +// results. func (c *ProjectsListCall) Filter(filter string) *ProjectsListCall { c.urlParams_.Set("filter", filter) return c @@ -7025,7 +7025,7 @@ func (c *ProjectsListCall) Header() http.Header { func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7091,7 +7091,7 @@ func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsRespon // "parameterOrder": [], // "parameters": { // "filter": { - // "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter strings: | Filter | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` | : : has the value `red` and its : : : label`size` has the value : : : `big`. : | lifecycleState:DELETE_REQUESTED | Only show projects that are | : : pending deletion. : If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", + // "description": "Optional. An expression for filtering the results of the request. Filter rules are case insensitive. If multiple fields are included in a filter query, the query will return results that match any of the fields. Some eligible fields for filtering are: + `name` + `id` + `labels.` (where *key* is the name of a label) + `parent.type` + `parent.id` + `lifecycleState` Some examples of filter queries: | Query | Description | |------------------|-----------------------------------------------------| | name:how* | The project's name starts with \"how\". | | name:Howl | The project's name is `Howl` or `howl`. | | name:HOWL | Equivalent to above. | | NAME:howl | Equivalent to above. | | labels.color:* | The project has the label `color`. | | labels.color:red | The project's label `color` has the value `red`. | | labels.color:red labels.size:big | The project's label `color` has the value `red` and its label `size` has the value `big`.| | lifecycleState:DELETE_REQUESTED | Only show projects that are pending deletion.| If no filter is specified, the call will return projects for which the user has the `resourcemanager.projects.get` permission. NOTE: To perform a by-parent query (eg., what projects are directly in a Folder), the caller must have the `resourcemanager.projects.list` permission on the parent and the filter must contain both a `parent.type` and a `parent.id` restriction (example: \"parent.type:folder parent.id:123\"). In this case an alternate search index is used which provides more consistent results.", // "location": "query", // "type": "string" // }, @@ -7189,7 +7189,7 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *ProjectsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7356,7 +7356,7 @@ func (c *ProjectsListOrgPoliciesCall) Header() http.Header { func (c *ProjectsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7558,7 +7558,7 @@ func (c *ProjectsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7702,7 +7702,7 @@ func (c *ProjectsSetOrgPolicyCall) Header() http.Header { func (c *ProjectsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7850,7 +7850,7 @@ func (c *ProjectsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7996,7 +7996,7 @@ func (c *ProjectsUndeleteCall) Header() http.Header { func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8139,7 +8139,7 @@ func (c *ProjectsUpdateCall) Header() http.Header { func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 1f635e430c..855604b75d 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -7,6 +7,7 @@ package internal import ( "context" "encoding/json" + "errors" "fmt" "io/ioutil" @@ -62,56 +63,68 @@ const ( serviceAccountKey = "service_account" ) -// credentialsFromJSON returns a google.Credentials based on the input. +// credentialsFromJSON returns a google.Credentials from the JSON data // -// - A self-signed JWT auth flow will be executed if: the data file is a service -// account, no user are scopes provided, an audience is provided, a user -// specified endpoint is not provided, and credentials will not be -// impersonated. +// - A self-signed JWT flow will be executed if the following conditions are +// met: +// (1) At least one of the following is true: +// (a) No scope is provided +// (b) Scope for self-signed JWT flow is enabled +// (c) Audiences are explicitly provided by users +// (2) No service account impersontation // -// - Otherwise, executes a stanard OAuth 2.0 flow. +// - Otherwise, executes standard OAuth 2.0 flow +// More details: google.aip.dev/auth/4111 func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + // By default, a standard OAuth 2.0 token source is created cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) if err != nil { return nil, err } - // Standard OAuth 2.0 Flow - if len(data) == 0 || - len(ds.Scopes) > 0 || - (ds.DefaultAudience == "" && len(ds.Audiences) == 0) || - ds.ImpersonationConfig != nil || - ds.Endpoint != "" { - return cred, nil - } - // Check if JSON is a service account and if so create a self-signed JWT. - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(cred.JSON, &f); err != nil { + // Override the token source to use self-signed JWT if conditions are met + isJWTFlow, err := isSelfSignedJWTFlow(data, ds) + if err != nil { return nil, err } - if f.Type == serviceAccountKey { - ts, err := selfSignedJWTTokenSource(data, ds.DefaultAudience, ds.Audiences) + if isJWTFlow { + ts, err := selfSignedJWTTokenSource(data, ds) if err != nil { return nil, err } cred.TokenSource = ts } + return cred, err } -func selfSignedJWTTokenSource(data []byte, defaultAudience string, audiences []string) (oauth2.TokenSource, error) { - audience := defaultAudience - if len(audiences) > 0 { - // TODO(shinfan): Update golang oauth to support multiple audiences. - if len(audiences) > 1 { - return nil, fmt.Errorf("multiple audiences support is not implemented") +func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { + if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && + ds.ImpersonationConfig == nil { + // Check if JSON is a service account and if so create a self-signed JWT. + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. } - audience = audiences[0] + if err := json.Unmarshal(data, &f); err != nil { + return false, err + } + return f.Type == serviceAccountKey, nil + } + return false, nil +} + +func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { + if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { + // Scopes are preferred in self-signed JWT unless the scope is not available + // or a custom audience is used. + return google.JWTAccessTokenSourceWithScope(data, ds.GetScopes()...) + } else if ds.GetAudience() != "" { + // Fallback to audience if scope is not provided + return google.JWTAccessTokenSourceFromJSON(data, ds.GetAudience()) + } else { + return nil, errors.New("neither scopes or audience are available for the self-signed JWT") } - return google.JWTAccessTokenSourceFromJSON(data, audience) } // QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 0ae1cb9778..89c7bc86fa 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -24,6 +24,7 @@ type DialSettings struct { DefaultMTLSEndpoint string Scopes []string DefaultScopes []string + EnableJwtWithScope bool TokenSource oauth2.TokenSource Credentials *google.Credentials CredentialsFile string // if set, Token Source is ignored. @@ -60,6 +61,19 @@ func (ds *DialSettings) GetScopes() []string { return ds.DefaultScopes } +// GetAudience returns the user-provided audience, if set, or else falls back to the default audience. +func (ds *DialSettings) GetAudience() string { + if ds.HasCustomAudience() { + return ds.Audiences[0] + } + return ds.DefaultAudience +} + +// HasCustomAudience returns true if a custom audience is provided by users. +func (ds *DialSettings) HasCustomAudience() bool { + return len(ds.Audiences) > 0 +} + // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { if ds.SkipValidation { diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 1fff22fd5d..ed0b7aaf13 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -94,3 +94,15 @@ func (w withDefaultScopes) Apply(o *internal.DialSettings) { o.DefaultScopes = make([]string, len(w)) copy(o.DefaultScopes, w) } + +// EnableJwtWithScope returns a ClientOption that specifies if scope can be used +// with self-signed JWT. +func EnableJwtWithScope() option.ClientOption { + return enableJwtWithScope(true) +} + +type enableJwtWithScope bool + +func (w enableJwtWithScope) Apply(o *internal.DialSettings) { + o.EnableJwtWithScope = bool(w) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 6b042b6add..15fb615af4 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -2454,7 +2454,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2607,7 +2607,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2776,7 +2776,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2951,7 +2951,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3117,7 +3117,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3296,7 +3296,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3484,7 +3484,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3665,7 +3665,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3873,7 +3873,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4092,7 +4092,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4351,7 +4351,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4565,7 +4565,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4802,7 +4802,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5033,7 +5033,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5211,7 +5211,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5453,7 +5453,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5665,7 +5665,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5787,7 +5787,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5940,7 +5940,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6110,7 +6110,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6302,7 +6302,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6480,7 +6480,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6659,7 +6659,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6834,7 +6834,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6985,7 +6985,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7157,7 +7157,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7334,7 +7334,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7514,7 +7514,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7693,7 +7693,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7888,7 +7888,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8089,7 +8089,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8281,7 +8281,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8486,7 +8486,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8729,7 +8729,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9085,7 +9085,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9417,7 +9417,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9654,7 +9654,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9908,7 +9908,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10228,7 +10228,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10603,7 +10603,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10924,7 +10924,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11329,7 +11329,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11636,7 +11636,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11841,7 +11841,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12106,7 +12106,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12426,7 +12426,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12645,7 +12645,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12798,7 +12798,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12937,7 +12937,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13139,7 +13139,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13338,7 +13338,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13517,7 +13517,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210606") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210721") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go index bccee044b0..191bea48c8 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/annotations.proto @@ -23,7 +23,6 @@ package annotations import ( reflect "reflect" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var file_google_api_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 15b02156da..66fdb650f4 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/client.proto @@ -23,7 +23,6 @@ package annotations import ( reflect "reflect" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 07d8486dba..164e0df0bf 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.27.1 // protoc v3.12.2 // source: google/api/field_behavior.proto @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // An indicator of the behavior of a given field (for example, that a field // is required in requests, or given as output but ignored as input). // This **does not** change the behavior in protocol buffers itself; it only @@ -78,6 +73,11 @@ const ( // in any arbitrary order, rather than the order the user originally // provided. Additionally, the list's order may or may not be stable. FieldBehavior_UNORDERED_LIST FieldBehavior = 6 + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 ) // Enum value maps for FieldBehavior. @@ -90,6 +90,7 @@ var ( 4: "INPUT_ONLY", 5: "IMMUTABLE", 6: "UNORDERED_LIST", + 7: "NON_EMPTY_DEFAULT", } FieldBehavior_value = map[string]int32{ "FIELD_BEHAVIOR_UNSPECIFIED": 0, @@ -99,6 +100,7 @@ var ( "INPUT_ONLY": 4, "IMMUTABLE": 5, "UNORDERED_LIST": 6, + "NON_EMPTY_DEFAULT": 7, } ) @@ -167,7 +169,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0x8f, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, @@ -176,20 +178,22 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x0a, 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, - 0x06, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, - 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, - 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 6c56d61921..4f34ab73cb 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/http.proto @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Defines the HTTP configuration for an API service. It contains a list of // [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method // to one or more HTTP REST API methods. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 32d5de97f0..3571ad634f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/resource.proto @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // A description of the historical or future-looking state of the // resource pattern. type ResourceDescriptor_History int32 diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 2ca10f8367..b47efe82b2 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/api/httpbody.proto @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Message that represents an arbitrary HTTP body. It should only be used for // payload formats that can't be represented as JSON, such as raw binary or // an HTML page. diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go index ca78f7881f..069b2c849e 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/bigtable_instance_admin.proto package admin @@ -25,7 +25,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" v1 "google.golang.org/genproto/googleapis/iam/v1" longrunning "google.golang.org/genproto/googleapis/longrunning" @@ -46,10 +45,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Request message for BigtableInstanceAdmin.CreateInstance. type CreateInstanceRequest struct { state protoimpl.MessageState diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go index 65676e121d..314c221715 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel +// protoc-gen-go v1.26.0 // protoc v3.12.2 // source: google/bigtable/admin/v2/bigtable_table_admin.proto @@ -25,7 +25,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" v1 "google.golang.org/genproto/googleapis/iam/v1" longrunning "google.golang.org/genproto/googleapis/longrunning" @@ -47,10 +46,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The request for // [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. type RestoreTableRequest struct { diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go index 9910cc5a66..3fec9fbe22 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/common.proto package admin @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Storage media types for persisting Bigtable data. type StorageType int32 diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go index bc35069356..3e3603a465 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/instance.proto package admin @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Possible states of an instance. type Instance_State int32 diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go index 113eb905f1..d532e544cb 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/bigtable/admin/v2/table.proto package admin @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -40,10 +39,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Indicates the type of the restore source. type RestoreSourceType int32 diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go index a2bb439178..465ef8a96a 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/bigtable/v2/bigtable.proto package bigtable @@ -25,7 +25,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" status "google.golang.org/genproto/googleapis/rpc/status" grpc "google.golang.org/grpc" @@ -43,10 +42,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Request message for Bigtable.ReadRows. type ReadRowsRequest struct { state protoimpl.MessageState diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go index 3097440044..746389aee9 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/bigtable/v2/data.proto package bigtable @@ -25,7 +25,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Specifies the complete (requested) contents of a single row of a table. // Rows which exceed 256MiB in size cannot be read in full. type Row struct { diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go index 8a0cca603e..f62ccc7268 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/iam/v1/iam_policy.proto package iam @@ -26,7 +26,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -42,10 +41,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { state protoimpl.MessageState diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go index f8f9fb0e92..22763ad963 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/iam/v1/options.proto package iam @@ -25,7 +25,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { state protoimpl.MessageState diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go index 78fa900862..f0a3048dba 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go @@ -15,8 +15,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/iam/v1/policy.proto package iam @@ -25,7 +25,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" expr "google.golang.org/genproto/googleapis/type/expr" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -39,10 +38,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The type of action performed on a Binding in a policy. type BindingDelta_Action int32 diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go b/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go index 8ba30e6b5f..44b30dca54 100644 --- a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/longrunning/operations.proto package longrunning @@ -25,7 +25,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" status "google.golang.org/genproto/googleapis/rpc/status" grpc "google.golang.org/grpc" @@ -46,10 +45,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // This resource represents a long-running operation that is the result of a // network API call. type Operation struct { diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 386fd7b13c..1258803152 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/rpc/code.proto package code @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The canonical error codes for gRPC APIs. // // diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index e79a538846..f34a38e4e9 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/rpc/status.proto package status @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is // used by [gRPC](https://github.com/grpc). Each `Status` message contains diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index cfe3342014..2857e26eb3 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0-devel -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.12.2 // source: google/type/expr.proto package expr @@ -24,7 +24,6 @@ import ( reflect "reflect" sync "sync" - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Represents a textual expression in the Common Expression Language (CEL) // syntax. CEL is a C-like expression language. The syntax and semantics of CEL // are documented at https://github.com/google/cel-spec. diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go index 6338c44dc4..d34efa9b1c 100644 --- a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go @@ -128,6 +128,9 @@ func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *file protocVersion := "(unknown)" if v := gen.Request.GetCompilerVersion(); v != nil { protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch()) + if s := v.GetSuffix(); s != "" { + protocVersion += "-" + s + } } g.P("// \tprotoc-gen-go ", protocGenGoVersion) g.P("// \tprotoc ", protocVersion) @@ -444,7 +447,15 @@ func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageIn case protoreflect.EnumKind: idx := field.Desc.DefaultEnumValue().Index() val := field.Enum.Values[idx] - consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + if val.GoIdent.GoImportPath == f.GoImportPath { + consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + consts = append(consts, fmt.Sprintf("%s = %s(%d) // %s", + name, g.QualifiedGoIdent(field.Enum.GoIdent), val.Desc.Number(), g.QualifiedGoIdent(val.GoIdent))) + } case protoreflect.FloatKind, protoreflect.DoubleKind: if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) { var fn, arg string @@ -527,25 +538,6 @@ func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInf g.P() f.needRawDesc = true } - - // ExtensionRangeArray method. - extRanges := m.Desc.ExtensionRanges() - if m.genExtRangeMethod && extRanges.Len() > 0 { - protoExtRange := protoifacePackage.Ident("ExtensionRangeV1") - extRangeVar := "extRange_" + m.GoIdent.GoName - g.P("var ", extRangeVar, " = []", protoExtRange, " {") - for i := 0; i < extRanges.Len(); i++ { - r := extRanges.Get(i) - g.P("{Start:", r[0], ", End:", r[1]-1 /* inclusive */, "},") - } - g.P("}") - g.P() - g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor.ExtensionRanges instead.") - g.P("func (*", m.GoIdent, ") ExtensionRangeArray() []", protoExtRange, " {") - g.P("return ", extRangeVar) - g.P("}") - g.P() - } } func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { @@ -566,7 +558,7 @@ func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageI // Getter for message field. goType, pointer := fieldGoType(g, f, field) - defaultValue := fieldDefaultValue(g, m, field) + defaultValue := fieldDefaultValue(g, f, m, field) g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location) leadingComments := appendDeprecationSuffix("", field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) @@ -688,7 +680,7 @@ func fieldProtobufTagValue(field *protogen.Field) string { return tag.Marshal(field.Desc, enumName) } -func fieldDefaultValue(g *protogen.GeneratedFile, m *messageInfo, field *protogen.Field) string { +func fieldDefaultValue(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field) string { if field.Desc.IsList() { return "nil" } @@ -707,7 +699,15 @@ func fieldDefaultValue(g *protogen.GeneratedFile, m *messageInfo, field *protoge case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind: return "nil" case protoreflect.EnumKind: - return g.QualifiedGoIdent(field.Enum.Values[0].GoIdent) + val := field.Enum.Values[0] + if val.GoIdent.GoImportPath == f.GoImportPath { + return g.QualifiedGoIdent(val.GoIdent) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + return g.QualifiedGoIdent(field.Enum.GoIdent) + "(" + strconv.FormatInt(int64(val.Desc.Number()), 10) + ")" + } default: return "0" } diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go index 17cbe1a391..2ee676fbb9 100644 --- a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -251,12 +251,13 @@ func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) { "\t• a \"M\" argument on the command line.\n\n"+ "See %v for more information.\n", fdesc.GetName(), goPackageDocURL) - case !strings.Contains(string(importPaths[filename]), "/"): - // Check that import paths contain at least one slash to avoid a - // common mistake where import path is confused with package name. + case !strings.Contains(string(importPaths[filename]), ".") && + !strings.Contains(string(importPaths[filename]), "/"): + // Check that import paths contain at least a dot or slash to avoid + // a common mistake where import path is confused with package name. return nil, fmt.Errorf( "invalid Go import path %q for %q\n\n"+ - "The import path must contain at least one forward slash ('/') character.\n\n"+ + "The import path must contain at least one period ('.') or forward slash ('/') character.\n\n"+ "See %v for more information.\n", string(importPaths[filename]), fdesc.GetName(), goPackageDocURL) case packageNames[filename] == "": diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 8fb1d9e086..179d6e8fc1 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -744,9 +744,6 @@ func (d decoder) skipValue() error { // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. - if err := d.skipValue(); err != nil { - return err - } } } } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index aa66bdd06a..da289ccce6 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -263,3 +263,8 @@ func (e *Encoder) Snapshot() encoderState { func (e *Encoder) Reset(es encoderState) { e.encoderState = es } + +// AppendString appends the escaped form of the input string to b. +func AppendString(b []byte, s string) []byte { + return appendString(b, s, false) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 3759b010c0..029feeefd7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -440,6 +440,13 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { if !ok { return piface.MergeOutput{} } + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. + return piface.MergeOutput{Flags: piface.MergeComplete} + } b, err := marshaler.Marshal() if err != nil { return piface.MergeOutput{} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 5879131da7..14e774fb2e 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,8 +52,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 26 - Patch = 0 + Minor = 27 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 66dcbcd0d2..59f024c444 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -94,7 +94,8 @@ type Files struct { // Note that enum values are in the top-level since that are in the same // scope as the parent enum. descsByName map[protoreflect.FullName]interface{} - filesByPath map[string]protoreflect.FileDescriptor + filesByPath map[string][]protoreflect.FileDescriptor + numFiles int } type packageDescriptor struct { @@ -117,17 +118,16 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { r.descsByName = map[protoreflect.FullName]interface{}{ "": &packageDescriptor{}, } - r.filesByPath = make(map[string]protoreflect.FileDescriptor) + r.filesByPath = make(map[string][]protoreflect.FileDescriptor) } path := file.Path() - if prev := r.filesByPath[path]; prev != nil { + if prev := r.filesByPath[path]; len(prev) > 0 { r.checkGenProtoConflict(path) err := errors.New("file %q is already registered", file.Path()) - err = amendErrorWithCaller(err, prev, file) - if r == GlobalFiles && ignoreConflict(file, err) { - err = nil + err = amendErrorWithCaller(err, prev[0], file) + if !(r == GlobalFiles && ignoreConflict(file, err)) { + return err } - return err } for name := file.Package(); name != ""; name = name.Parent() { @@ -168,7 +168,8 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { r.descsByName[d.FullName()] = d }) - r.filesByPath[path] = file + r.filesByPath[path] = append(r.filesByPath[path], file) + r.numFiles++ return nil } @@ -308,6 +309,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // // This returns (nil, NotFound) if not found. +// This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { return nil, NotFound @@ -316,13 +318,19 @@ func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) globalMutex.RLock() defer globalMutex.RUnlock() } - if fd, ok := r.filesByPath[path]; ok { - return fd, nil + fds := r.filesByPath[path] + switch len(fds) { + case 0: + return nil, NotFound + case 1: + return fds[0], nil + default: + return nil, errors.New("multiple files named %q", path) } - return nil, NotFound } -// NumFiles reports the number of registered files. +// NumFiles reports the number of registered files, +// including duplicate files with the same name. func (r *Files) NumFiles() int { if r == nil { return 0 @@ -331,10 +339,11 @@ func (r *Files) NumFiles() int { globalMutex.RLock() defer globalMutex.RUnlock() } - return len(r.filesByPath) + return r.numFiles } // RangeFiles iterates over all registered files while f returns true. +// If multiple files have the same name, RangeFiles iterates over all of them. // The iteration order is undefined. func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { if r == nil { @@ -344,9 +353,11 @@ func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { globalMutex.RLock() defer globalMutex.RUnlock() } - for _, file := range r.filesByPath { - if !f(file) { - return + for _, files := range r.filesByPath { + for _, file := range files { + if !f(file) { + return + } } } } diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index f77239fc3b..abe4ab5115 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -43,7 +43,6 @@ package descriptorpb import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" @@ -829,15 +828,6 @@ func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} } -var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ExtensionRangeOptions -} - func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1520,15 +1510,6 @@ func (*FileOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} } -var extRange_FileOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_FileOptions -} - func (x *FileOptions) GetJavaPackage() string { if x != nil && x.JavaPackage != nil { return *x.JavaPackage @@ -1776,15 +1757,6 @@ func (*MessageOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} } -var extRange_MessageOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_MessageOptions -} - func (x *MessageOptions) GetMessageSetWireFormat() bool { if x != nil && x.MessageSetWireFormat != nil { return *x.MessageSetWireFormat @@ -1930,15 +1902,6 @@ func (*FieldOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} } -var extRange_FieldOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_FieldOptions -} - func (x *FieldOptions) GetCtype() FieldOptions_CType { if x != nil && x.Ctype != nil { return *x.Ctype @@ -2030,15 +1993,6 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } -var extRange_OneofOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_OneofOptions -} - func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2101,15 +2055,6 @@ func (*EnumOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} } -var extRange_EnumOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_EnumOptions -} - func (x *EnumOptions) GetAllowAlias() bool { if x != nil && x.AllowAlias != nil { return *x.AllowAlias @@ -2183,15 +2128,6 @@ func (*EnumValueOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} } -var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_EnumValueOptions -} - func (x *EnumValueOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2258,15 +2194,6 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } -var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ServiceOptions -} - func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2335,15 +2262,6 @@ func (*MethodOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} } -var extRange_MethodOptions = []protoiface.ExtensionRangeV1{ - {Start: 1000, End: 536870911}, -} - -// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_MethodOptions -} - func (x *MethodOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated diff --git a/vendor/modules.txt b/vendor/modules.txt index 796f6e478e..23593e3410 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go v0.83.0 +# cloud.google.com/go v0.87.0 cloud.google.com/go cloud.google.com/go/compute/metadata cloud.google.com/go/iam @@ -65,7 +65,7 @@ github.com/armon/go-metrics github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.38.60 +# github.com/aws/aws-sdk-go v1.40.10 ## explicit github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -142,6 +142,8 @@ github.com/coreos/go-systemd/v22/journal github.com/coreos/pkg/capnslog # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew +# github.com/dennwc/varint v1.0.0 +github.com/dennwc/varint # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f github.com/dgryski/go-rendezvous # github.com/dimchansky/utfbom v1.1.1 @@ -257,7 +259,7 @@ github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/golang/snappy v0.0.3 +# github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy # github.com/google/btree v1.0.0 @@ -268,13 +270,13 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9 +# github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303 github.com/google/pprof/profile # github.com/google/uuid v1.2.0 github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 -# github.com/gorilla/mux v1.7.3 +# github.com/gorilla/mux v1.8.0 ## explicit github.com/gorilla/mux # github.com/gorilla/websocket v1.4.2 @@ -300,7 +302,7 @@ github.com/grpc-ecosystem/grpc-gateway/runtime github.com/grpc-ecosystem/grpc-gateway/utilities # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/hailocab/go-hostpool -# github.com/hashicorp/consul/api v1.8.1 +# github.com/hashicorp/consul/api v1.9.1 ## explicit github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.0.0 @@ -358,7 +360,7 @@ github.com/leanovate/gopter/prop github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mattn/go-colorable v0.1.6 +# github.com/mattn/go-colorable v0.1.8 github.com/mattn/go-colorable # github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe github.com/mattn/go-ieproxy @@ -366,7 +368,7 @@ github.com/mattn/go-ieproxy github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/dns v1.1.42 +# github.com/miekg/dns v1.1.43 github.com/miekg/dns # github.com/minio/md5-simd v1.1.0 github.com/minio/md5-simd @@ -479,7 +481,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint # github.com/prometheus/client_model v0.2.0 ## explicit github.com/prometheus/client_model/go -# github.com/prometheus/common v0.29.0 +# github.com/prometheus/common v0.30.0 ## explicit github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -493,7 +495,7 @@ github.com/prometheus/node_exporter/https github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20210720123808-b1ed4a0a663d => github.com/grafana/prometheus-private v0.0.0-20210720123808-b1ed4a0a663d +# github.com/prometheus/prometheus v1.8.2-0.20210720123808-b1ed4a0a663d => github.com/grafana/prometheus-private v0.0.0-20210813131603-a3ad21265842 ## explicit github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -532,6 +534,7 @@ github.com/prometheus/prometheus/tsdb/tombstones github.com/prometheus/prometheus/tsdb/tsdbutil github.com/prometheus/prometheus/tsdb/wal github.com/prometheus/prometheus/util/httputil +github.com/prometheus/prometheus/util/osutil github.com/prometheus/prometheus/util/stats github.com/prometheus/prometheus/util/strutil github.com/prometheus/prometheus/util/teststorage @@ -561,7 +564,7 @@ github.com/soheilhy/cmux # github.com/sony/gobreaker v0.4.1 ## explicit github.com/sony/gobreaker -# github.com/spf13/afero v1.2.2 +# github.com/spf13/afero v1.3.4 ## explicit github.com/spf13/afero github.com/spf13/afero/mem @@ -824,7 +827,7 @@ golang.org/x/lint/golint # golang.org/x/mod v0.4.2 golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20210610132358-84b48f89b13b +# golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 ## explicit golang.org/x/net/bpf golang.org/x/net/context @@ -842,7 +845,7 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c +# golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials @@ -855,7 +858,7 @@ golang.org/x/oauth2/jwt ## explicit golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 +# golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader @@ -868,11 +871,10 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 +# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac ## explicit golang.org/x/time/rate # golang.org/x/tools v0.1.5 -## explicit golang.org/x/tools/cmd/goimports golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata @@ -889,7 +891,7 @@ golang.org/x/tools/internal/typeparams # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.48.0 +# google.golang.org/api v0.51.0 ## explicit google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/googleapi @@ -920,7 +922,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 +# google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/bigtable/admin/v2 @@ -931,7 +933,7 @@ google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.38.0 +# google.golang.org/grpc v1.39.0 => google.golang.org/grpc v1.38.0 ## explicit google.golang.org/grpc google.golang.org/grpc/attributes @@ -994,7 +996,7 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.26.0 +# google.golang.org/protobuf v1.27.1 google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo google.golang.org/protobuf/compiler/protogen google.golang.org/protobuf/encoding/protojson @@ -1056,6 +1058,7 @@ sigs.k8s.io/yaml # k8s.io/api => k8s.io/api v0.20.4 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab -# github.com/prometheus/prometheus => github.com/grafana/prometheus-private v0.0.0-20210720123808-b1ed4a0a663d +# github.com/prometheus/prometheus => github.com/grafana/prometheus-private v0.0.0-20210813131603-a3ad21265842 # github.com/hashicorp/go-immutable-radix => github.com/hashicorp/go-immutable-radix v1.2.0 # github.com/hashicorp/go-hclog => github.com/hashicorp/go-hclog v0.12.2 +# google.golang.org/grpc => google.golang.org/grpc v1.38.0